[med-svn] [python-skbio] 01/06: Imported Upstream version 0.4.1

Kevin Murray daube-guest at moszumanska.debian.org
Mon Dec 21 06:01:20 UTC 2015


This is an automated email from the git hooks/post-receive script.

daube-guest pushed a commit to branch master
in repository python-skbio.

commit 9f5733952eefa4a7d3f056c54ee12f2b41e5cc8e
Author: Kevin Murray <spam at kdmurray.id.au>
Date:   Mon Dec 21 16:42:21 2015 +1100

    Imported Upstream version 0.4.1
---
 .gitignore                                         |    3 +
 .travis.yml                                        |   16 +-
 CHANGELOG.md                                       |  111 +
 CONTRIBUTING.md                                    |   67 +-
 MANIFEST.in                                        |    6 +
 Makefile                                           |   13 +-
 README.rst                                         |    4 +-
 RELEASE.md                                         |   28 +-
 asv.conf.json                                      |   19 +
 benchmarks/__init__.py                             |    0
 benchmarks/benchmarks.py                           |   64 +
 checklist.py                                       |    2 +-
 ci/conda_requirements.txt                          |   14 +
 ci/pip_requirements.txt                            |   11 +
 doc/README.md                                      |   23 +-
 doc/source/_static/copybutton.js                   |   44 +-
 doc/source/_static/style.css                       |   27 +-
 doc/source/_templates/autosummary/class.rst        |   14 +-
 doc/source/conf.py                                 |   83 +-
 doc/source/draw.rst                                |    1 -
 doc/source/index.rst                               |    1 -
 setup.py                                           |   18 +-
 skbio/__init__.py                                  |   10 +-
 skbio/_base.py                                     |  834 +++-
 skbio/alignment/__init__.py                        |  152 +-
 skbio/alignment/_alignment.py                      | 1374 ------
 skbio/alignment/_exception.py                      |   19 -
 skbio/alignment/_indexing.py                       |  219 +
 skbio/alignment/_pairwise.py                       |  302 +-
 skbio/alignment/_repr.py                           |   68 +
 skbio/alignment/_ssw_wrapper.c                     | 4514 ++++++++++++--------
 skbio/alignment/_ssw_wrapper.pyx                   |    1 -
 skbio/alignment/_tabular_msa.py                    | 2342 ++++++++++
 skbio/alignment/tests/test_alignment.py            |  735 ----
 skbio/alignment/tests/test_pairwise.py             |  666 +--
 skbio/alignment/tests/test_ssw.py                  |   65 +-
 skbio/alignment/tests/test_tabular_msa.py          | 3676 ++++++++++++++++
 skbio/diversity/__init__.py                        |  382 +-
 skbio/diversity/_driver.py                         |  277 ++
 .../__subsample.c => diversity/_phylogenetic.c}    | 4053 ++++++++++++------
 skbio/diversity/_phylogenetic.pyx                  |  215 +
 skbio/diversity/_util.py                           |  143 +
 skbio/diversity/alpha/__init__.py                  |  117 +-
 skbio/diversity/alpha/_ace.py                      |   30 +-
 skbio/diversity/alpha/_base.py                     |  331 +-
 skbio/diversity/alpha/_chao1.py                    |   13 +-
 skbio/diversity/alpha/_faith_pd.py                 |  143 +
 skbio/diversity/alpha/_gini.py                     |   12 +-
 skbio/diversity/alpha/_lladser.py                  |    6 +-
 .../alpha/tests/data/qiime-191-tt/README.md        |    1 +
 .../alpha/tests/data/qiime-191-tt/faith-pd.txt     |    9 +
 .../alpha/tests/data/qiime-191-tt/otu-table.tsv    |   12 +
 .../alpha/tests/data/qiime-191-tt/tree.nwk         |    1 +
 skbio/diversity/alpha/tests/test_base.py           |   96 +-
 skbio/diversity/alpha/tests/test_faith_pd.py       |  209 +
 skbio/diversity/beta/__init__.py                   |  166 +-
 skbio/diversity/beta/_base.py                      |  105 -
 skbio/diversity/beta/_unifrac.py                   |  579 +++
 .../beta/tests/data/qiime-191-tt/README.md         |   26 +
 .../beta/tests/data/qiime-191-tt/otu-table.tsv     |   12 +
 .../beta/tests/data/qiime-191-tt/tree.nwk          |    1 +
 .../data/qiime-191-tt/unweighted_unifrac_dm.txt    |   10 +
 .../weighted_normalized_unifrac_dm.txt             |   10 +
 .../data/qiime-191-tt/weighted_unifrac_dm.txt      |   10 +
 skbio/diversity/beta/tests/test_base.py            |  165 -
 skbio/diversity/beta/tests/test_unifrac.py         |  690 +++
 skbio/{draw => diversity}/tests/__init__.py        |    0
 skbio/diversity/tests/test_driver.py               |  632 +++
 skbio/diversity/tests/test_util.py                 |  240 ++
 skbio/draw/__init__.py                             |   39 -
 skbio/draw/_distributions.py                       |  711 ---
 skbio/draw/tests/test_distributions.py             |  595 ---
 skbio/io/__init__.py                               |   33 +-
 skbio/io/_exception.py                             |   12 +-
 skbio/io/_iosources.py                             |   15 +-
 skbio/io/format/_base.py                           |   19 +-
 skbio/io/format/_blast.py                          |   44 +
 skbio/io/format/blast6.py                          |  276 ++
 skbio/io/format/blast7.py                          |  384 ++
 skbio/io/format/clustal.py                         |  155 +-
 skbio/io/format/fasta.py                           |  402 +-
 skbio/io/format/fastq.py                           |  141 +-
 skbio/io/format/genbank.py                         |  890 ++++
 skbio/io/format/newick.py                          |    6 +-
 skbio/io/format/ordination.py                      |   54 +-
 skbio/io/format/phylip.py                          |  255 +-
 skbio/io/format/qseq.py                            |   96 +-
 skbio/io/format/tests/data/blast6_custom_minimal   |    1 +
 .../io/format/tests/data/blast6_custom_mixed_nans  |    2 +
 .../io/format/tests/data/blast6_custom_multi_line  |    3 +
 .../io/format/tests/data/blast6_custom_single_line |    1 +
 .../io/format/tests/data/blast6_default_multi_line |    3 +
 .../format/tests/data/blast6_default_single_line   |    1 +
 .../format/tests/data/blast6_invalid_column_types  |    3 +
 .../tests/data/blast6_invalid_number_of_columns    |    1 +
 .../tests/data/blast6_invalid_type_in_column       |    2 +
 skbio/io/format/tests/data/blast7_custom_minimal   |    6 +
 .../io/format/tests/data/blast7_custom_mixed_nans  |   16 +
 .../io/format/tests/data/blast7_custom_multi_line  |   16 +
 .../io/format/tests/data/blast7_custom_single_line |   15 +
 .../io/format/tests/data/blast7_default_multi_line |   12 +
 .../format/tests/data/blast7_default_single_line   |   10 +
 .../tests/data/blast7_invalid_differing_fields     |   21 +
 .../format/tests/data/blast7_invalid_for_sniffer   |    4 +
 .../format/tests/data/blast7_invalid_for_sniffer_2 |    4 +
 .../io/format/tests/data/blast7_invalid_gibberish  |    3 +
 skbio/io/format/tests/data/blast7_invalid_no_data  |   24 +
 .../tests/data/blast7_invalid_too_many_columns     |   21 +
 .../tests/data/blast7_invalid_unrecognized_field   |   10 +
 ...erent_type => fasta_tabular_msa_different_type} |    0
 .../tests/data/genbank_5_blanks_start_of_file      |    6 +
 .../tests/data/genbank_6_blanks_start_of_file      |    7 +
 .../format/tests/data/genbank_missing_locus_name   |    1 +
 skbio/io/format/tests/data/genbank_multi_records   |   46 +
 skbio/io/format/tests/data/genbank_single_record   |   27 +
 .../format/tests/data/genbank_single_record_lower  |    4 +
 .../format/tests/data/genbank_single_record_upper  |    4 +
 .../tests/data/genbank_w_beginning_whitespace      |    1 +
 .../format/tests/data/legacy9_and_blast7_default   |   12 +
 .../tests/data/legacy9_invalid_differing_fields    |   20 +
 .../tests/data/legacy9_invalid_too_many_columns    |    6 +
 skbio/io/format/tests/data/legacy9_mixed_nans      |    7 +
 skbio/io/format/tests/data/legacy9_multi_line      |    8 +
 skbio/io/format/tests/data/legacy9_single_line     |    6 +
 .../data/phylip_invalid_empty_line_after_header    |    4 +
 .../data/phylip_invalid_empty_line_before_header   |    4 +
 .../data/phylip_invalid_empty_line_between_seqs    |    4 +
 .../tests/data/phylip_invalid_header_too_long      |    3 +
 .../tests/data/phylip_invalid_header_too_short     |    3 +
 .../io/format/tests/data/phylip_invalid_no_header  |    2 +
 .../format/tests/data/phylip_invalid_seq_too_long  |    3 +
 .../format/tests/data/phylip_invalid_seq_too_short |    3 +
 .../format/tests/data/phylip_invalid_too_few_seqs  |    2 +
 .../format/tests/data/phylip_invalid_too_many_seqs |    3 +
 .../format/tests/data/phylip_invalid_zero_seq_len  |    2 +
 .../io/format/tests/data/phylip_invalid_zero_seqs  |    1 +
 .../format/tests/data/phylip_variable_length_ids   |   10 +-
 .../tests/data/phylip_varied_whitespace_in_seqs    |    3 +
 .../tests/data/phylip_whitespace_in_header_1       |    3 +
 .../tests/data/phylip_whitespace_in_header_2       |    3 +
 .../tests/data/phylip_whitespace_in_header_3       |    3 +
 ...ferent_type => qual_tabular_msa_different_type} |    0
 skbio/io/format/tests/test_blast6.py               |  123 +
 skbio/io/format/tests/test_blast7.py               |  212 +
 skbio/io/format/tests/test_clustal.py              |   77 +-
 skbio/io/format/tests/test_fasta.py                |  274 +-
 skbio/io/format/tests/test_fastq.py                |  116 +-
 skbio/io/format/tests/test_genbank.py              |  503 +++
 skbio/io/format/tests/test_ordination.py           |  174 +-
 skbio/io/format/tests/test_phylip.py               |  212 +-
 skbio/io/format/tests/test_qseq.py                 |   47 +-
 skbio/io/registry.py                               |   18 +-
 skbio/io/tests/test_registry.py                    |    3 +-
 skbio/io/tests/test_util.py                        |   13 +-
 skbio/io/util.py                                   |    6 +-
 skbio/sequence/__init__.py                         |   58 +-
 skbio/sequence/_base.py                            |   43 -
 skbio/sequence/_dna.py                             |   25 +-
 skbio/sequence/_genetic_code.py                    |    6 +-
 skbio/sequence/_iupac_sequence.py                  |  150 +-
 skbio/sequence/_nucleotide_mixin.py                |    8 +-
 skbio/sequence/_protein.py                         |   18 +-
 skbio/sequence/_repr.py                            |  108 +
 skbio/sequence/_rna.py                             |   85 +-
 skbio/sequence/_sequence.py                        | 1056 +++--
 skbio/sequence/tests/test_base.py                  |   48 -
 skbio/sequence/tests/test_iupac_sequence.py        |   45 +-
 skbio/sequence/tests/test_rna.py                   |   45 +
 skbio/sequence/tests/test_sequence.py              |  993 ++---
 skbio/stats/__init__.py                            |    1 -
 skbio/stats/__subsample.c                          | 1733 +++++---
 skbio/stats/composition.py                         |  640 ++-
 skbio/stats/distance/__init__.py                   |    4 +-
 skbio/stats/distance/_base.py                      |  102 +-
 skbio/stats/distance/_mantel.py                    |    6 +-
 skbio/stats/distance/tests/test_base.py            |   76 +
 skbio/stats/gradient.py                            |   10 +-
 skbio/stats/ordination/__init__.py                 |   95 +-
 skbio/stats/ordination/_base.py                    |  402 --
 .../_canonical_correspondence_analysis.py          |  345 +-
 skbio/stats/ordination/_correspondence_analysis.py |  295 +-
 .../ordination/_principal_coordinate_analysis.py   |  181 +-
 skbio/stats/ordination/_redundancy_analysis.py     |  364 +-
 skbio/stats/ordination/_utils.py                   |   60 +-
 skbio/stats/ordination/tests/data/example2_Y       |   22 +-
 .../ordination/tests/data/example2_biplot_scaling1 |    4 +
 .../ordination/tests/data/example2_biplot_scaling2 |    4 +
 .../data/example2_sample_constraints_scaling1      |   10 +
 .../data/example2_sample_constraints_scaling2      |   10 +
 .../ordination/tests/data/example3_biplot_scaling1 |    3 +
 .../ordination/tests/data/example3_biplot_scaling2 |    3 +
 .../data/example3_sample_constraints_scaling1      |   10 +
 .../data/example3_sample_constraints_scaling2      |   10 +
 .../test_canonical_correspondence_analysis.py      |  174 +
 .../tests/test_correspondence_analysis.py          |  194 +
 skbio/stats/ordination/tests/test_ordination.py    |  887 ----
 .../tests/test_principal_coordinate_analysis.py    |  132 +
 .../ordination/tests/test_redundancy_analysis.py   |  171 +
 skbio/stats/ordination/tests/test_util.py          |   69 +
 skbio/stats/power.py                               |  141 +-
 skbio/stats/spatial.py                             |  201 -
 skbio/stats/tests/test_composition.py              |  666 ++-
 skbio/stats/tests/test_gradient.py                 |   12 +-
 skbio/stats/tests/test_power.py                    |   18 -
 skbio/stats/tests/test_spatial.py                  |  146 -
 skbio/tests/test_base.py                           |  377 +-
 skbio/tree/__init__.py                             |   93 +-
 skbio/tree/_majority_rule.py                       |   75 +-
 skbio/tree/_tree.py                                |  313 +-
 skbio/tree/_trie.py                                |  266 --
 skbio/tree/tests/test_tree.py                      |  242 +-
 skbio/tree/tests/test_trie.py                      |  216 -
 skbio/util/__init__.py                             |   25 +-
 skbio/util/_decorator.py                           |   34 +-
 skbio/util/_metadata_repr.py                       |  168 +
 skbio/util/_misc.py                                |   64 +-
 skbio/util/_testing.py                             | 1069 ++++-
 skbio/util/_warning.py                             |   12 +
 skbio/util/tests/test_decorator.py                 |   60 +-
 skbio/util/tests/test_misc.py                      |   37 +-
 skbio/util/tests/test_testing.py                   |  179 +-
 skbio/workflow.py                                  |   26 +-
 222 files changed, 29506 insertions(+), 14176 deletions(-)

diff --git a/.gitignore b/.gitignore
index 257d39b..b7406cc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -44,3 +44,6 @@ nosetests.xml
 
 # Sphinx builds
 doc/source/generated
+
+# OSX files
+.DS_Store
diff --git a/.travis.yml b/.travis.yml
index 9ea3927..bc23772 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,10 +1,10 @@
 # Check on http://lint.travis-ci.org/ after modifying it!  Originally
 # modified from https://gist.github.com/dan-blanchard/7045057
+sudo: false
 language: python
 env:
-  - PYTHON_VERSION=3.4
-  - PYTHON_VERSION=3.3
-  - PYTHON_VERSION=2.7 USE_CYTHON=TRUE
+  - PYTHON_VERSION=3.5 MAKE_DOC=TRUE
+  - PYTHON_VERSION=3.4 USE_CYTHON=TRUE
   - PYTHON_VERSION=2.7
 before_install:
   - "export DISPLAY=:99.0"
@@ -16,14 +16,14 @@ before_install:
   # Update conda itself
   - conda update --yes conda
 install:
-  - conda create --yes -n env_name python=$PYTHON_VERSION pip numpy scipy matplotlib pandas nose pep8 Sphinx=1.2.2 IPython
+  - conda create --yes -n env_name python=$PYTHON_VERSION --file ci/conda_requirements.txt
   - if [ ${USE_CYTHON} ]; then conda install --yes -n env_name cython; fi
   - source activate env_name
-  - pip install sphinx-bootstrap-theme HTTPretty future six bz2file contextlib2 coveralls natsort pyflakes flake8 python-dateutil  decorator 'CacheControl[FileCache]' git+git://github.com/numpy/numpydoc.git
-  - pip install -e . --no-deps
+  - pip install -r ci/pip_requirements.txt
+  - pip install . --no-deps
 script:
-  - PYTHONWARNINGS=ignore WITH_COVERAGE=TRUE make test
-  - make -C doc clean html
+  - WITH_COVERAGE=TRUE make test
+  - if [ ${MAKE_DOC} ]; then make -C doc clean html; fi
 after_success:
   - coveralls
 notifications:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 45cc1a0..7c5a901 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,116 @@
 # scikit-bio changelog
 
+## Version 0.4.1 (2015-12-09)
+
+### Features
+* The ``TabularMSA`` object was added to represent and operate on tabular multiple sequence alignments. This statisfies [RFC 1](https://github.com/biocore/scikit-bio-rfcs/blob/master/active/001-tabular-msa.md). See the ``TabularMSA`` docs for full details.
+* Added phylogenetic diversity metrics, including weighted UniFrac, unweighted UniFrac, and Faith's Phylogenetic Diversity. These are accessible as ``skbio.diversity.beta.unweighted_unifrac``, ``skbio.diversity.beta.weighted_unifrac``, and ``skbio.diversity.alpha.faith_pd``, respectively.
+* Addition of the function ``skbio.diversity.alpha_diversity`` to support applying an alpha diversity metric to multiple samples in one call.
+* Addition of the functions ``skbio.diversity.get_alpha_diversity_metrics`` and ``skbio.diversity.get_beta_diversity_metrics`` to support discovery of the alpha and beta diversity metrics implemented in scikit-bio.
+* Added `skbio.stats.composition.ancom` function, a test for OTU differential abundance across sample categories. ([#1054](https://github.com/biocore/scikit-bio/issues/1054))
+* Added `skbio.io.format.blast7` for reading BLAST+ output format 7 or BLAST output format 9 files into a `pd.DataFrame`. ([#1110](https://github.com/biocore/scikit-bio/issues/1110))
+* Added `skbio.DissimilarityMatrix.to_data_frame` method for creating a ``pandas.DataFrame`` from a `DissimilarityMatrix` or `DistanceMatrix`. ([#757](https://github.com/biocore/scikit-bio/issues/757))
+* Added support for one-dimensional vector of dissimilarities in `skbio.stats.distance.DissimilarityMatrix`
+constructor. ([#6240](https://github.com/biocore/scikit-bio/issues/624))
+* Added `skbio.io.format.blast6` for reading BLAST+ output format 6 or BLAST output format 8 files into a `pd.DataFrame`. ([#1110](https://github.com/biocore/scikit-bio/issues/1110))
+* Added `inner`, `ilr`, `ilr_inv` and `clr_inv`, ``skbio.stats.composition``, which enables linear transformations on compositions ([#892](https://github.com/biocore/scikit-bio/issues/892)
+* Added ``skbio.diversity.alpha.pielou_e`` function as an evenness metric of alpha diversity. ([#1068](https://github.com/biocore/scikit-bio/issues/1068))
+* Added `to_regex` method to `skbio.sequence._iupac_sequence` ABC - it returns a regex object that matches all non-degenerate versions of the sequence.
+* Added ``skbio.util.assert_ordination_results_equal`` function for comparing ``OrdinationResults`` objects in unit tests.
+* Added ``skbio.io.format.genbank`` for reading and writing GenBank/GenPept for ``DNA``, ``RNA``, ``Protein`` and ``Sequence`` classes.
+* Added ``skbio.util.RepresentationWarning`` for warning about substitutions, assumptions, or particular alterations that were made for the successful completion of a process.
+* ``TreeNode.tip_tip_distances`` now supports nodes without an associated length. In this case, a length of 0.0 is assumed and an ``skbio.util.RepresentationWarning`` is raised. Previous behavior was to raise a ``NoLengthError``. ([#791](https://github.com/biocore/scikit-bio/issues/791))
+* ``DistanceMatrix`` now has a new constructor method called `from_iterable`.
+* ``Sequence`` now accepts ``lowercase`` keyword like ``DNA`` and others. Updated ``fasta``, ``fastq``, and ``qseq`` readers/writers for ``Sequence`` to reflect this.
+* The ``lowercase`` method has been moved up to ``Sequence`` meaning all sequence objects now have a ``lowercase`` method.
+* Added ``reverse_transcribe`` class method to ``RNA``.
+* Added `Sequence.observed_chars` property for obtaining the set of observed characters in a sequence. ([#1075](https://github.com/biocore/scikit-bio/issues/1075))
+* Added `Sequence.frequencies` method for computing character frequencies in a sequence. ([#1074](https://github.com/biocore/scikit-bio/issues/1074))
+* Added experimental class-method ``Sequence.concat`` which will produce a new sequence from an iterable of existing sequences. Parameters control how positional metadata is propagated during a concatenation.
+* ``TreeNode.to_array`` now supports replacing ``nan`` branch lengths in the resulting branch length vector with the value provided as ``nan_length_value``.
+* ``skbio.io.format.phylip`` now supports sniffing and reading strict, sequential PHYLIP-formatted files into ``skbio.Alignment`` objects. ([#1006](https://github.com/biocore/scikit-bio/issues/1006))
+* Added `default_gap_char` class property to ``DNA``, ``RNA``, and ``Protein`` for representing gap characters in a new sequence.
+
+### Backward-incompatible changes [stable]
+* `Sequence.kmer_frequencies` now returns a `dict`. Previous behavior was to return a `collections.Counter` if `relative=False` was passed, and a `collections.defaultdict` if `relative=True` was passed. In the case of a missing key, the `Counter` would return 0 and the `defaultdict` would return 0.0. Because the return type is now always a `dict`, attempting to access a missing key will raise a `KeyError`. This change *may* break backwards-compatibility depending on how the `Counter`/`de [...]
+
+   If the previous behavior is desired, convert the `dict` into a `Counter`/`defaultdict`:
+
+    ```python
+    import collections
+    from skbio import Sequence
+    seq = Sequence('ACCGAGTTTAACCGAATA')
+
+    # Counter
+    freqs_dict = seq.kmer_frequencies(k=8)
+    freqs_counter = collections.Counter(freqs_dict)
+
+    # defaultdict
+    freqs_dict = seq.kmer_frequencies(k=8, relative=True)
+    freqs_default_dict = collections.defaultdict(float, freqs_dict)
+    ```
+
+   **Rationale:** We believe it is safer to return `dict` instead of `Counter`/`defaultdict` as this may prevent error-prone usage of the return value. Previous behavior allowed accessing missing kmers, returning 0 or 0.0 depending on the `relative` parameter. This is convenient in many cases but also potentially misleading. For example, consider the following code:
+
+    ```python
+    from skbio import Sequence
+    seq = Sequence('ACCGAGTTTAACCGAATA')
+    freqs = seq.kmer_frequencies(k=8)
+    freqs['ACCGA']
+    ```
+
+    Previous behavior would return 0 because the kmer `'ACCGA'` is not present in the `Counter`. In one respect this is the correct answer because we asked for kmers of length 8; `'ACCGA'` is a different length so it is not included in the results. However, we believe it is safer to avoid this implicit behavior in case the user assumes there are no `'ACCGA'` kmers in the sequence (which there are!). A `KeyError` in this case is more explicit and forces the user to consider their query. R [...]
+
+### Backward-incompatible changes [experimental]
+* Replaced ``PCoA``, ``CCA``, ``CA`` and ``RDA`` in ``skbio.stats.ordination`` with equivalent functions ``pcoa``, ``cca``, ``ca`` and ``rda``. These functions now take ``pd.DataFrame`` objects.
+* Change ``OrdinationResults`` to have its attributes based on ``pd.DataFrame`` and ``pd.Series`` objects, instead of pairs of identifiers and values. The changes are as follows:
+    - ``species`` and ``species_ids`` have been replaced by a ``pd.DataFrame`` named ``features``.
+    - ``site`` and ``site_ids`` have been replaced by a ``pd.DataFrame`` named ``samples``.
+    - ``eigvals`` is now a ``pd.Series`` object.
+    - ``proportion_explained`` is now a ``pd.Series`` object.
+    - ``biplot`` is now a ``pd.DataFrame`` object named ``biplot_scores``.
+    - ``site_constraints`` is now a ``pd.DataFrame`` object named ``sample_constraints``.
+* ``short_method_name`` and ``long_method_name`` are now required arguments of the ``OrdinationResults`` object.
+* Removed `skbio.diversity.alpha.equitability`. Please use `skbio.diversity.alpha.pielou_e`, which is more accurately named and better documented. Note that `equitability` by default used logarithm base 2 while `pielou_e` uses logarithm base `e` as described in Heip 1974.
+* ``skbio.diversity.beta.pw_distances`` is now called ``skbio.diversity.beta_diversity``. This function no longer defines a default metric, and ``metric`` is now the first argument to this function. This function can also now take a pairwise distances function as ``pairwise_func``.
+* Deprecated function ``skbio.diversity.beta.pw_distances_from_table`` has been removed from scikit-bio as scheduled. Code that used this should be adapted to use ``skbio.diversity.beta_diversity``.
+* ``TreeNode.index_tree`` now returns a 2-D numpy array as its second return value (the child node index) instead of a 1-D numpy array.
+* Deprecated functions `skbio.draw.boxplots` and `skbio.draw.grouped_distributions` have been removed from scikit-bio as scheduled. These functions generated plots that were not specific to bioinformatics. These types of plots can be generated with seaborn or another general-purpose plotting package.
+* Deprecated function `skbio.stats.power.bootstrap_power_curve` has been removed from scikit-bio as scheduled. Use `skbio.stats.power.subsample_power` or `skbio.stats.power.subsample_paired_power` followed by `skbio.stats.power.confidence_bound`.
+* Deprecated function `skbio.stats.spatial.procrustes` has been removed from scikit-bio as scheduled in favor of `scipy.spatial.procrustes`.
+* Deprecated class `skbio.tree.CompressedTrie` and function `skbio.tree.fasta_to_pairlist` have been removed from scikit-bio as scheduled in favor of existing general-purpose Python trie packages.
+* Deprecated function `skbio.util.flatten` has been removed from scikit-bio as scheduled in favor of solutions available in the Python standard library (see [here](http://stackoverflow.com/a/952952/3639023) and [here](http://stackoverflow.com/a/406199/3639023) for examples).
+* Pairwise alignment functions in `skbio.alignment` now return a tuple containing the `TabularMSA` alignment, alignment score, and start/end positions. The returned `TabularMSA`'s `index` is always the default integer index; sequence IDs are no longer propagated to the MSA. Additionally, the pairwise alignment functions now accept the following input types to align:
+    - `local_pairwise_align_nucleotide`: `DNA` or `RNA`
+    - `local_pairwise_align_protein`: `Protein`
+    - `local_pairwise_align`: `IUPACSequence`
+    - `global_pairwise_align_nucleotide`: `DNA`, `RNA`, or `TabularMSA[DNA|RNA]`
+    - `global_pairwise_align_protein`: `Protein` or `TabularMSA[Protein]`
+    - `global_pairwise_align`: `IUPACSequence` or `TabularMSA`
+    - `local_pairwise_align_ssw`: `DNA`, `RNA`, or `Protein`. Additionally, this function now overrides the `protein` kwarg based on input type. `constructor` parameter was removed because the function now determines the return type based on input type.
+* Removed `skbio.alignment.SequenceCollection` in favor of using a list or other standard library containers to store scikit-bio sequence objects (most `SequenceCollection` operations were simple list comprehensions). Use `DistanceMatrix.from_iterable` instead of `SequenceCollection.distances` (pass `key="id"` to exactly match original behavior).
+* Removed `skbio.alignment.Alignment` in favor of `skbio.alignment.TabularMSA`.
+* Removed `skbio.alignment.SequenceCollectionError` and `skbio.alignment.AlignmentError` exceptions as their corresponding classes no longer exist.
+
+### Bug Fixes
+
+* ``Sequence`` objects now handle slicing of empty positional metadata correctly. Any metadata that is empty will no longer be propagated by the internal ``_to`` constructor. ([#1133](https://github.com/biocore/scikit-bio/issues/1133))
+* ``DissimilarityMatrix.plot()`` no longer leaves a white border around the
+  heatmap it plots (PR #1070).
+* TreeNode.root_at_midpoint`` no longer fails when a node with two equal length child branches exists in the tree. ([#1077](https://github.com/biocore/scikit-bio/issues/1077))
+* ``TreeNode._set_max_distance``, as called through ``TreeNode.get_max_distance`` or ``TreeNode.root_at_midpoint`` would store distance information as ``list``s in the attribute ``MaxDistTips`` on each node in the tree, however, these distances were only valid for the node in which the call to ``_set_max_distance`` was made. The values contained in ``MaxDistTips`` are now correct across the tree following a call to ``get_max_distance``. The scope of impact of this bug is limited to users [...]
+* Added missing `nose` dependency to setup.py's `install_requires`. ([#1214](https://github.com/biocore/scikit-bio/issues/1214))
+* Fixed issue that resulted in legends of ``OrdinationResult`` plots sometimes being truncated. ([#1210](https://github.com/biocore/scikit-bio/issues/1210))
+
+### Deprecated functionality [stable]
+* `skbio.Sequence.copy` has been deprecated in favor of `copy.copy(seq)` and `copy.deepcopy(seq)`.
+
+### Miscellaneous
+* Doctests are now written in Python 3.
+* ``make test`` now validates MANIFEST.in using [check-manifest](https://github.com/mgedmin/check-manifest). ([#461](https://github.com/biocore/scikit-bio/issues/461))
+* Many new alpha diversity equations added to ``skbio.diversity.alpha`` documentation. ([#321](https://github.com/biocore/scikit-bio/issues/321))
+* Order of ``lowercase`` and ``validate`` keywords swapped in ``DNA``, ``RNA``, and ``Protein``.
+
 ## Version 0.4.0 (2015-07-08)
 
 Initial beta release. In addition to the changes detailed below, the following
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6da2ccf..f167f1d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -23,7 +23,7 @@ When considering contributing to scikit-bio, you should begin by posting an issu
 
 * For documentation additions, you should first post an issue describing what you propose to add, where you'd like to add it in the documentation, and a description of why you think it's an important addition. For documentation improvements and fixes, you should post an issue describing what is currently wrong or missing and how you propose to address it. For more information about building and contributing to scikit-bio's documentation, see our [documentation guide](doc/README.md).
 
-When you post your issue, the scikit-bio developers will respond to let you know if we agree with the addition or change. It's very important that you go through this step to avoid wasting time working on a feature that we are not interested in including in scikit-bio. **This initial discussion with the developers is particularly important prior to our beta (0.4.0) release, as scikit-bio is rapidly changing. This includes complete re-writes of some of the core objects, so if you don't ge [...]
+When you post your issue, the scikit-bio developers will respond to let you know if we agree with the addition or change. It's very important that you go through this step to avoid wasting time working on a feature that we are not interested in including in scikit-bio. **This initial discussion with the developers is important because scikit-bio is rapidly changing, including complete re-writes of some of the core objects. If you don't get in touch first you could easily waste time by wo [...]
 
 Getting started
 ---------------
@@ -53,7 +53,9 @@ Particularly for big changes, if you'd like feedback on your code in the form of
 Submitting code to scikit-bio
 -----------------------------
 
-scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pull Request](https://help.github.com/articles/using-pull-requests) mechanism for reviewing and accepting submissions. Once You should go through the following steps to submit code to scikit-bio.
+scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pull Request](https://help.github.com/articles/using-pull-requests) mechanism for reviewing and accepting submissions. You should work through the following steps to submit code to scikit-bio.
+
+**Note:** We recommend developing scikit-bio in a Python 3 environment because doctests must be written (and pass) in Python 3. See [Setting up a development environment](#setting-up-a-development-environment).
 
 1. Begin by [creating an issue](https://github.com/biocore/scikit-bio/issues) describing your proposed change (see [Types of contributions](#types-of-contributions) for details).
 
@@ -69,11 +71,7 @@ scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pu
  git pull upstream master
  ```
 
-5. Install scikit-bio in "development mode" so that your changes are reflected in the installed package without having to reinstall the package each time:
-
- ```
- pip install -e .
- ```
+5. Install scikit-bio for development. See [Setting up a development environment](#setting-up-a-development-environment).
 
 6. Create a new topic branch that you will make your changes in with ``git checkout -b``:
 
@@ -108,6 +106,59 @@ scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pu
 
 13. Issue a [pull request](https://help.github.com/articles/using-pull-requests) on the GitHub website to request that we merge your branch's changes into scikit-bio's master branch. Be sure to include a description of your changes in the pull request, as well as any other information that will help the scikit-bio developers involved in reviewing your code. Please include ``fixes #<issue-number>`` in your pull request description or in one of your commit messages so that the correspondin [...]
 
+Setting up a development environment
+------------------------------------
+
+The recommended way to set up a development environment for contributing to scikit-bio is using [Anaconda](https://store.continuum.io/cshop/anaconda/) by Continuum Analytics, with its associated command line utility `conda`. The primary benefit of `conda` over `pip` is that on some operating systems (ie Linux), `pip` installs packages from source. This can take a very long time to install Numpy, scipy, matplotlib, etc. `conda` installs these packages using pre-built binaries, so the inst [...]
+
+1. Install Anaconda
+
+ See [Continuum's site](https://store.continuum.io/cshop/anaconda/) for instructions. [Miniconda](http://conda.pydata.org/docs/install/quick.html) provides a great fast way to get conda up and running.
+
+2. Create a new conda environment
+ ```
+ conda create -n env_name python=3.4 pip
+ ```
+
+ Note that `env_name` can be any name desired, for example
+
+ ```
+ conda create -n skbio python=3.4 pip
+ ```
+
+3. Activate the environment
+
+ This may be slightly different depending on the operating system. Refer to the Continuum site to find instructions for your OS.
+ ```
+ source activate env_name
+ ```
+
+4. Navigate to the scikit-bio directory
+ See [the section on submitting code](#submitting-code-to-scikit-bio).
+ ```
+ cd /path/to/scikit-bio
+ ```
+
+5. Install `conda` requirements
+ ```
+ conda install --file ci/conda_requirements.txt
+ ```
+
+6. Install `pip` requirements
+ ```
+ pip install -r ci/pip_requirements.txt
+ ```
+
+7. Install scikit-bio
+ ```
+ pip install --no-deps -e .
+ ```
+
+8. Test the installation
+ ```
+ make test
+ ```
+
 Coding guidelines
 -----------------
 
@@ -119,7 +170,7 @@ Testing guidelines
 All code that is added to scikit-bio must be unit tested, and the unit test code must be submitted in the same pull request as the library code that you are submitting. We will only merge code that is unit tested and that passes the [continuous integration build](https://github.com/biocore/scikit-bio/blob/master/.travis.yml). This build includes, but is not limited to, the following checks:
 
 - Full unit test suite executes without errors in Python 2 and 3.
-- Doctests execute correctly (currently only for Python 2).
+- Doctests execute correctly in Python 3.
 - C code can be correctly compiled.
 - Cython code is correctly generated.
 - All tests import functionality from the appropriate minimally deep API.
diff --git a/MANIFEST.in b/MANIFEST.in
index 93ee436..7a92a0e 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
+include .coveragerc
 include CHANGELOG.md
 include CONTRIBUTING.md
 include COPYING.txt
@@ -5,14 +6,19 @@ include Makefile
 include README.rst
 include RELEASE.md
 include checklist.py
+include asv.conf.json
 
 graft assets
+graft ci
 graft doc
 graft ipynbs
 graft licenses
 graft skbio
 
 prune doc/build
+prune doc/source/generated
 
 global-exclude *.pyc
 global-exclude *.pyo
+global-exclude *.so
+global-exclude .*.swp
diff --git a/Makefile b/Makefile
index d56ce75..22151eb 100644
--- a/Makefile
+++ b/Makefile
@@ -7,13 +7,20 @@
 # ----------------------------------------------------------------------------
 
 ifeq ($(WITH_COVERAGE), TRUE)
-	TEST_COMMAND = coverage run -m skbio.test
+	TEST_COMMAND = COVERAGE_FILE=../.coverage coverage run \
+	--rcfile ../.coveragerc -m skbio.test
 else
 	TEST_COMMAND = python -m skbio.test
 endif
 
+# cd into a directory that is different from scikit-bio root directory to
+# simulate a user's install and testing of scikit-bio. Running from the root
+# directory will find the `skbio` subpackage (not necessarily the installed
+# one!) because cwd is considered in Python's search path. It is important to
+# simulate a user's install/test process this way to find package data that did
+# not install correctly (for example).
 test:
-	$(TEST_COMMAND)
-	pep8 skbio setup.py checklist.py
+	cd ci && $(TEST_COMMAND)
 	flake8 skbio setup.py checklist.py
 	./checklist.py
+	check-manifest
diff --git a/README.rst b/README.rst
index b1633f0..08a8897 100644
--- a/README.rst
+++ b/README.rst
@@ -10,7 +10,7 @@ scikit-bio is an open-source, BSD-licensed Python package providing data structu
 To view scikit-bio's documentation, visit `scikit-bio.org
 <http://scikit-bio.org>`__.
 
-scikit-bio is currently in beta. We are very actively developing it, and **backward-incompatible interface changes can and will arise**. To avoid these types of changes being a surprise to our users, our public APIs are decorated to make it clear to users when an API can be relied upon (stable) and when it may be subject to change (experimental). See the `API stability docs <https://github.com/biocore/scikit-bio/tree/0.4.0/doc/source/user/api_stability.rst>`_ for more details, including  [...]
+scikit-bio is currently in beta. We are very actively developing it, and **backward-incompatible interface changes can and will arise**. To avoid these types of changes being a surprise to our users, our public APIs are decorated to make it clear to users when an API can be relied upon (stable) and when it may be subject to change (experimental). See the `API stability docs <https://github.com/biocore/scikit-bio/blob/master/doc/source/user/api_stability.rst>`_ for more details, including [...]
 
 Installing
 ----------
@@ -21,7 +21,7 @@ To install the latest release of scikit-bio::
 
 Equivalently, you can use the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_ to install scikit-bio and its dependencies without having to compile them::
 
-    conda install scikit-bio
+    conda install -c https://conda.anaconda.org/biocore scikit-bio
 
 Finally, most of scikit-bio's dependencies (in particular, the ones that are trickier to build) are also available, albeit only for Python 2, in `Canopy Express <https://www.enthought.com/canopy-express/>`_.
 
diff --git a/RELEASE.md b/RELEASE.md
index ef4af61..e436e02 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -106,7 +106,33 @@ Assuming the GitHub release tarball correctly installs and passes its tests, you
         pip install scikit-bio
         python -m skbio.test
 
-If this succeeds, the release appears to be a success!
+    If this succeeds, the pypi release appears to be a success.
+
+6. Next, we'll prepare and post the release to [anaconda.org](http://www.anaconda.org).
+
+    You'll need to have ``conda-build`` and ``anaconda-client`` installed to perform these steps. Both can be conda-installed. First, log into anaconda using the following command. You should log into the ``biocore`` anaconda account (if you don't have login info, get in touch with [@gregcaporaso](https://github.com/gregcaporaso) who is the owner of that account).
+
+        anaconda login
+
+    Due to its C extensions, releasing scikit-bio packages for different platforms will require you to perform the following steps on each of those platforms. For example, an ``osx-64`` package will need to be built on OS X, and a ``linux-64`` package will need to be built on 64-bit Linux. These steps will be the same on all platforms, so you should repeat them for every platform you want to release for.
+
+        conda skeleton pypi scikit-bio
+        conda build scikit-bio --python 2.7
+        conda build scikit-bio --python 3.5
+
+    At this stage you have built Python 2.7 and 3.5 packages. The absolute path to the packages will be provided as output from each ``conda build`` commands. You should now create conda environments for each, and run the tests as described above. You can install these local packages as follows:
+
+        conda install --use-local scikit-bio
+
+    If the tests pass, you're ready to upload.
+
+        anaconda upload <package-filepath>
+
+    ``<package-filepath>`` should be replaced with the path to the package that was was created above. Repeat this for each package you created (here, the Python 2.7 and 3.5 packages).
+
+    After uploading, you should create new environments for every package you uploaded, install scikit-bio from each package, and re-run the tests. You can install the packages you uploaded as follows:
+
+        conda install -c https://conda.anaconda.org/biocore scikit-bio
 
 ## Post-release cleanup
 
diff --git a/asv.conf.json b/asv.conf.json
new file mode 100644
index 0000000..d219cbd
--- /dev/null
+++ b/asv.conf.json
@@ -0,0 +1,19 @@
+{
+  "environment_type": "conda",
+  "matrix": {
+    "ipython": [],
+    "matplotlib": [],
+    "nose": [],
+    "numpy": [],
+    "pandas": [],
+    "scipy": []
+  },
+  "project": "scikit-bio",
+  "project_url": "http://scikit-bio.org/",
+  "pythons": [
+    "3.4",
+    "3.3",
+    "2.7"
+  ],
+  "version": 1
+}
diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py
new file mode 100644
index 0000000..da5f2bd
--- /dev/null
+++ b/benchmarks/benchmarks.py
@@ -0,0 +1,64 @@
+# Write the benchmarking functions here.
+# See "Writing benchmarks" in the asv docs for more information.
+
+from skbio import DNA, RNA
+import numpy as np
+
+num_bases = 1000000
+size = int(num_bases / 4)
+short_len = 100
+
+dna_template_bytes = [ord(x) for x in 'ACGT']
+dna_template_bytes_gapped = [ord(x) for x in 'AC-.']
+rna_template_bytes = [ord(x) for x in 'ACGU']
+
+dna_bytes = np.array(dna_template_bytes * size, dtype=np.uint8)
+dna_bytes_short = dna_bytes[:short_len]
+dna_bytes_gapped = np.array(dna_template_bytes_gapped * size, dtype=np.uint8)
+rna_bytes = np.array(rna_template_bytes * size, dtype=np.uint8)
+
+dna_seq = DNA(dna_bytes)
+dna_seq_short = DNA(dna_bytes_short)
+dna_gapped = DNA(dna_bytes_gapped)
+rna_seq = RNA(rna_bytes)
+
+motif_1 = "GGTGCAAGCCGGTGGAAACA"
+motif_1_regex = '(' + motif_1 + ')'
+
+
+def consume_iterator(iterator):
+    for _ in iterator:
+        pass
+
+
+class BenchmarkSuite:
+
+    def time_object_creation(self):
+        DNA(dna_bytes, validate=False)
+
+    def time_object_creation_validate(self):
+        DNA(dna_bytes)
+
+    def time_reverse_complement(self):
+        dna_seq.reverse_complement()
+
+    def time_degap_all(self):
+        dna_seq.degap()
+
+    def time_translate(self):
+        rna_seq.translate()
+
+    def time_search_for_motif(self):
+        consume_iterator(dna_seq.find_with_regex(motif_1_regex))
+
+    def time_kmer_count_5(self):
+        dna_seq_short.kmer_frequencies(5)
+
+    def time_kmer_count_25(self):
+        dna_seq_short.kmer_frequencies(25)
+
+    def time_gc_content(self):
+        dna_seq.gc_content()
+
+    def time_search_for_motif_in_gapped(self):
+        dna_seq.find_with_regex(motif_1_regex, ignore=dna_seq.gaps())
diff --git a/checklist.py b/checklist.py
index 8c6442d..d5856c2 100755
--- a/checklist.py
+++ b/checklist.py
@@ -347,7 +347,7 @@ class APIRegressionValidator(RepoValidator):
     """Flag tests that import from a non-minimized subpackage hierarchy.
 
     Flags tests that aren't imported from a minimally deep API target. (e.g.
-    skbio.Alignment vs skbio.alignment.Alignment). This should prevent
+    skbio.TabularMSA vs skbio.alignment.TabularMSA). This should prevent
     accidental regression in our API because tests will fail if any alias is
     removed, and this checklist will fail if any test doesn't import from the
     least deep API target.
diff --git a/ci/conda_requirements.txt b/ci/conda_requirements.txt
new file mode 100644
index 0000000..5cc2170
--- /dev/null
+++ b/ci/conda_requirements.txt
@@ -0,0 +1,14 @@
+pip
+numpy
+scipy
+matplotlib
+pandas
+nose
+pep8
+ipython
+future
+six
+pyflakes
+flake8
+python-dateutil
+decorator
diff --git a/ci/pip_requirements.txt b/ci/pip_requirements.txt
new file mode 100644
index 0000000..33a7f0b
--- /dev/null
+++ b/ci/pip_requirements.txt
@@ -0,0 +1,11 @@
+HTTPretty
+bz2file
+contextlib2
+coveralls
+natsort
+lockfile
+CacheControl
+git+git://github.com/sphinx-doc/sphinx.git
+sphinx-bootstrap-theme
+git+git://github.com/numpy/numpydoc.git@1a848331c2cf53d4fe356f4607799524bcc577ed
+check-manifest
diff --git a/doc/README.md b/doc/README.md
index c1c0ac7..ff3f3b4 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -10,18 +10,8 @@ visit [scikit-bio.org](http://scikit-bio.org).
 Building the documentation
 --------------------------
 
-To build the documentation, you'll need the following Python packages
-installed:
-
-- [Sphinx](http://sphinx-doc.org/) == 1.2.2
-- [sphinx-bootstrap-theme](https://pypi.python.org/pypi/sphinx-bootstrap-theme/)
-- [numpydoc](https://github.com/numpy/numpydoc) >= v0.6
-
-An easy way to install the dependencies is via pip:
-
-    pip install Sphinx sphinx-bootstrap-theme git+git://github.com/numpy/numpydoc.git
-
-Finally, you will need to install scikit-bio.
+To build the documentation, you'll need a scikit-bio development environment
+set up. See [CONTRIBUTING.md](../CONTRIBUTING.md) for instructions.
 
 **Important:** The documentation will be built for whatever version of
 scikit-bio is *currently installed* on your system (i.e., the version imported
@@ -143,9 +133,9 @@ to each object is inserted into the page for you.
 
 After listing public module members, we encourage a usage example section
 showing how to use some of the module's functionality. Examples should be
-written in [doctest](http://docs.python.org/2/library/doctest.html) format so
+written in [doctest](http://docs.python.org/3/library/doctest.html) format so
 that they can be automatically tested (e.g., using ```make test``` or
-```python -m skbio.test```).
+```python -m skbio.test```). Doctests should be written in Python 3.
 
     Examples
     --------
@@ -161,8 +151,9 @@ documentation with the ```.. plot::``` directive. For example:
 
     .. plot::
 
-       >>> from skbio.draw import boxplots
-       >>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
+       >>> import pandas as pd
+       >>> df = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [10, 11, 12, 13]})
+       >>> fig = df.boxplot()
 
 This will include the plot, a link to the source code used to generate the
 plot, and links to different image formats (e.g., PNG and PDF) so that users
diff --git a/doc/source/_static/copybutton.js b/doc/source/_static/copybutton.js
index 168a26c..0f65a72 100644
--- a/doc/source/_static/copybutton.js
+++ b/doc/source/_static/copybutton.js
@@ -2,11 +2,11 @@
 $(document).ready(function() {
     /* Add a [>>>] button on the top-right corner of code samples to hide
      * the >>> and ... prompts and the output and thus make the code
-     * copyable. 
+     * copyable.
      * Note: This JS snippet was taken from the official python.org
      * documentation site.*/
     var div = $('.highlight-python .highlight,' +
-                '.highlight-python3 .highlight,' + 
+                '.highlight-python3 .highlight,' +
                 '.highlight-pycon .highlight')
     var pre = div.find('pre');
 
@@ -21,7 +21,8 @@ $(document).ready(function() {
         'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
         'border-color': border_color, 'border-style': border_style,
         'border-width': border_width, 'color': border_color, 'text-size': '75%',
-        'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em'
+        'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em',
+        'display': 'inline'
     }
 
     // create and add the button to all the code blocks that contain >>>
@@ -32,6 +33,25 @@ $(document).ready(function() {
             button.css(button_styles)
             button.attr('title', hide_text);
             jthis.prepend(button);
+
+            var show_output = false;
+            button.bind('click', function() {
+                if (show_output) {
+                    var button = $(this);
+                    button.parent().find('.go, .gp, .gt').show();
+                    button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
+                    button.css('text-decoration', 'none');
+                    button.attr('title', hide_text);
+                    show_output = false;
+                } else {
+                    var button = $(this);
+                    button.parent().find('.go, .gp, .gt').hide();
+                    button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
+                    button.css('text-decoration', 'line-through');
+                    button.attr('title', show_text);
+                    show_output = true;
+                }
+            });
         }
         // tracebacks (.gt) contain bare text elements that need to be
         // wrapped in a span to work with .nextUntil() (see later)
@@ -39,22 +59,4 @@ $(document).ready(function() {
             return ((this.nodeType == 3) && (this.data.trim().length > 0));
         }).wrap('<span>');
     });
-
-    // define the behavior of the button when it's clicked
-    $('.copybutton').toggle(
-        function() {
-            var button = $(this);
-            button.parent().find('.go, .gp, .gt').hide();
-            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
-            button.css('text-decoration', 'line-through');
-            button.attr('title', show_text);
-        },
-        function() {
-            var button = $(this);
-            button.parent().find('.go, .gp, .gt').show();
-            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
-            button.css('text-decoration', 'none');
-            button.attr('title', hide_text);
-        });
 });
-
diff --git a/doc/source/_static/style.css b/doc/source/_static/style.css
index 3ae6991..06016de 100644
--- a/doc/source/_static/style.css
+++ b/doc/source/_static/style.css
@@ -14,9 +14,14 @@ blockquote {
     font-size: 14px !important;
 }
 
-cite, code {
+code {
+    color: #259D57 !important;
+    font-size: 100% !important;
+}
+
+cite, code.docutils.literal:not(.xref) {
     padding: 1px 4px !important;
-    font-size: 90% !important;
+    font-size: 100% !important;
     color: #000 !important;
     background-color: #F5F5F5 !important;
     white-space: nowrap !important;
@@ -26,6 +31,24 @@ cite, code {
     font-family: Menlo,Monaco,Consolas,"Courier New",monospace !important;
 }
 
+a code.docutils.literal:not(.xref) {
+    color: #259D57 !important;
+    background: none !important;
+    border: none !important;
+}
+
+a:active code.docutils.literal:not(.xref),
+a:hover code.docutils.literal:not(.xref),
+a:focus code.docutils.literal:not(.xref) {
+    color: #00B84D !important;
+}
+
+#navbar .dropdown-menu > li > a:active code.docutils.literal:not(.xref),
+#navbar .dropdown-menu > li > a:hover code.docutils.literal:not(.xref),
+#navbar .dropdown-menu > li > a:focus code.docutils.literal:not(.xref) {
+    color: #FFF !important;
+}
+
 .label {
     display: table-cell !important;
     color: #000 !important;
diff --git a/doc/source/_templates/autosummary/class.rst b/doc/source/_templates/autosummary/class.rst
index 7cc1e1c..1a42cc9 100644
--- a/doc/source/_templates/autosummary/class.rst
+++ b/doc/source/_templates/autosummary/class.rst
@@ -8,7 +8,19 @@
       .. autosummary::
          :toctree:
       {% for item in all_methods %}
-         {%- if not item.startswith('_') or item in ['__call__'] %}
+         {# We want to build dunder methods if they exist, but not every kind of dunder. These are the dunders provided by default on `object` #}
+         {%- if not item.startswith('_') or (item not in ['__class__',
+                                                          '__delattr__',
+                                                          '__getattribute__',
+                                                          '__init__',
+                                                          '__dir__',
+                                                          '__new__',
+                                                          '__reduce__',
+                                                          '__reduce_ex__',
+                                                          '__repr__',
+                                                          '__setattr__',
+                                                          '__sizeof__',
+                                                          '__subclasshook__'] and item.startswith('__')) %}
          {{ name }}.{{ item }}
          {%- endif -%}
       {%- endfor %}
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 3905715..30a8a04 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -4,12 +4,62 @@
 import glob
 import sys
 import os
+import types
+import re
 
-# Check that dependencies are installed and the correct version if necessary
-sphinx_version = '1.2.2'
 import sphinx
-if sphinx.__version__ != sphinx_version:
-    raise RuntimeError("Sphinx %s required" % sphinx_version)
+import sphinx.ext.autosummary as autosummary
+
+class NewAuto(autosummary.Autosummary):
+    def get_items(self, names):
+        # Camel to snake case from http://stackoverflow.com/a/1176023/579416
+        first_cap_re = re.compile('(.)([A-Z][a-z]+)')
+        all_cap_re = re.compile('([a-z0-9])([A-Z])')
+        def fix_item(display_name, sig, summary, real_name):
+            class_names = {
+                'TreeNode': 'tree',
+                'TabularMSA': 'msa'
+            }
+
+            class_name = real_name.split('.')[-2]
+            if class_name in class_names:
+                nice_name = class_names[class_name]
+            else:
+                s1 = first_cap_re.sub(r'\1_\2', class_name)
+                nice_name = all_cap_re.sub(r'\1_\2', s1).lower()
+                if len(nice_name) > 10:
+                    nice_name = ''.join([e[0] for e in nice_name.split('_')])
+            def fmt(string):
+                count = string.count('%s')
+                return string % tuple([nice_name] * count)
+
+            specials = {
+                '__eq__': fmt('%s1 == %s2'),
+                '__ne__': fmt('%s1 != %s2'),
+                '__gt__': fmt('%s1 > %s2'),
+                '__lt__': fmt('%s1 < %s2'),
+                '__ge__': fmt('%s1 >= %s2'),
+                '__le__': fmt('%s1 <= %s2'),
+                '__getitem__': fmt('%s[x]'),
+                '__iter__': fmt('iter(%s)'),
+                '__contains__': fmt('x in %s'),
+                '__bool__': fmt('bool(%s)'),
+                '__str__': fmt('str(%s)'),
+                '__reversed__': fmt('reversed(%s)'),
+                '__len__': fmt('len(%s)'),
+                '__copy__': fmt('copy.copy(%s)'),
+                '__deepcopy__': fmt('copy.deepcopy(%s)'),
+            }
+            if display_name in specials:
+                return specials[display_name], '', summary, real_name
+            return display_name, sig, summary, real_name
+
+        skip = ['__nonzero__']
+
+        return [fix_item(*e) for e in super(NewAuto, self).get_items(names)
+                if e[0] not in skip]
+
+autosummary.Autosummary = NewAuto
 
 import sphinx_bootstrap_theme
 
@@ -24,7 +74,28 @@ try:
 except ImportError:
     raise RuntimeError(
         "numpydoc v0.6 or later required. Install it with:\n"
-        "  pip install git+git://github.com/numpy/numpydoc.git")
+        "  pip install git+git://github.com/numpy/numpydoc.git@1a848331c2cf53"
+        "d4fe356f4607799524bcc577ed")
+
+ at property
+def _extras(self):
+    # This will be accessed in a for-loop, so memoize to prevent quadratic
+    # behavior.
+    if not hasattr(self, '__memoized_extras'):
+        # We want every dunder that has a function type (not class slot),
+        # meaning we created the dunder, not Python.
+        # We don't ever care about __init__ and the user will see plenty of
+        # __repr__ calls, so why waste space.
+        self.__memoized_extras = [
+            a for a, v in inspect.getmembers(self._cls)
+            if type(v) == types.FunctionType and a.startswith('__')
+            and a not in ['__init__', '__repr__']
+        ]
+    return self.__memoized_extras
+
+# The extra_public_methods depends on what class we are looking at.
+numpydoc.docscrape.ClassDoc.extra_public_methods = _extras
+
 
 import skbio
 from skbio.util._decorator import classproperty
@@ -381,7 +452,7 @@ intersphinx_mapping = {
         'http://docs.scipy.org/doc/numpy': None,
         'http://docs.scipy.org/doc/scipy/reference': None,
         'http://matplotlib.org': None,
-        'http://pandas.pydata.org': None,
+        'http://pandas.pydata.org/pandas-docs/stable': None,
         'http://www.biom-format.org':None
 }
 
diff --git a/doc/source/draw.rst b/doc/source/draw.rst
deleted file mode 100644
index 18779ae..0000000
--- a/doc/source/draw.rst
+++ /dev/null
@@ -1 +0,0 @@
-.. automodule:: skbio.draw
diff --git a/doc/source/index.rst b/doc/source/index.rst
index ff64847..57bd1fb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -16,7 +16,6 @@ API Reference
    alignment
    tree
    workflow
-   draw
    diversity
    stats
    util
diff --git a/setup.py b/setup.py
index 02ae5ab..539697f 100644
--- a/setup.py
+++ b/setup.py
@@ -83,7 +83,9 @@ extensions = [
     Extension("skbio.alignment._ssw_wrapper",
               ["skbio/alignment/_ssw_wrapper" + ext,
                "skbio/alignment/_lib/ssw.c"],
-              extra_compile_args=ssw_extra_compile_args)
+              extra_compile_args=ssw_extra_compile_args),
+    Extension("skbio.diversity._phylogenetic",
+              ["skbio/diversity/_phylogenetic" + ext])
 ]
 
 if USE_CYTHON:
@@ -100,14 +102,14 @@ setup(name='scikit-bio',
       maintainer="scikit-bio development team",
       maintainer_email="gregcaporaso at gmail.com",
       url='http://scikit-bio.org',
-      test_suite='nose.collector',
       packages=find_packages(),
       ext_modules=extensions,
       cmdclass={'build_ext': build_ext},
       setup_requires=['numpy >= 1.9.2'],
       install_requires=[
           'bz2file >= 0.98',
-          'CacheControl[FileCache] >= 0.11.5',
+          'lockfile >= 0.10.2',
+          'CacheControl >= 0.11.5',
           'contextlib2 >= 0.4.0',
           'decorator >= 3.4.2',
           'future >= 0.14.3',
@@ -115,15 +117,15 @@ setup(name='scikit-bio',
           'matplotlib >= 1.4.3',
           'natsort >= 4.0.3',
           'numpy >= 1.9.2',
-          'pandas >= 0.16.2',
+          'pandas >= 0.17.0',
           'scipy >= 0.15.1',
-          'six >= 1.9.0'
+          'six >= 1.9.0',
+          'nose >= 1.3.7'
       ],
-      extras_require={'test': ["HTTPretty", "nose", "pep8", "flake8",
-                               "python-dateutil"],
-                      'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
       classifiers=classifiers,
       package_data={
+          'skbio.diversity.alpha.tests': ['data/qiime-191-tt/*'],
+          'skbio.diversity.beta.tests': ['data/qiime-191-tt/*'],
           'skbio.io.tests': ['data/*'],
           'skbio.io.format.tests': ['data/*'],
           'skbio.stats.tests': ['data/*'],
diff --git a/skbio/__init__.py b/skbio/__init__.py
index 35067ce..68856a5 100644
--- a/skbio/__init__.py
+++ b/skbio/__init__.py
@@ -15,18 +15,18 @@ import skbio.io  # noqa
 # imports included for convenience
 from skbio.sequence import Sequence, DNA, RNA, Protein, GeneticCode
 from skbio.stats.distance import DistanceMatrix
-from skbio.alignment import (
-    local_pairwise_align_ssw, SequenceCollection, Alignment)
+from skbio.alignment import local_pairwise_align_ssw, TabularMSA
 from skbio.tree import TreeNode, nj
 from skbio.io import read, write
+from skbio._base import OrdinationResults
 
 
 __all__ = ['Sequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
-           'DistanceMatrix', 'local_pairwise_align_ssw', 'SequenceCollection',
-           'Alignment', 'TreeNode', 'nj', 'read', 'write']
+           'DistanceMatrix', 'local_pairwise_align_ssw', 'TabularMSA',
+           'TreeNode', 'nj', 'read', 'write', 'OrdinationResults']
 
 __credits__ = "https://github.com/biocore/scikit-bio/graphs/contributors"
-__version__ = "0.4.0"
+__version__ = "0.4.1"
 
 mottos = [
     # 03/15/2014
diff --git a/skbio/_base.py b/skbio/_base.py
index c5444fe..aac8289 100644
--- a/skbio/_base.py
+++ b/skbio/_base.py
@@ -8,11 +8,25 @@
 
 from __future__ import absolute_import, division, print_function
 from future.utils import with_metaclass
+from future.builtins import zip
 
-from abc import ABCMeta, abstractmethod
+import abc
+import copy
+import functools
 
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D  # noqa
+from IPython.core.pylabtools import print_figure
+from IPython.core.display import Image, SVG
 
-class SkbioObject(with_metaclass(ABCMeta, object)):
+from skbio.stats._misc import _pprint_strs
+from skbio.util._decorator import stable, experimental
+
+
+class SkbioObject(with_metaclass(abc.ABCMeta, object)):
     """Abstract base class defining core API common to all scikit-bio objects.
 
     Public scikit-bio classes should subclass this class to ensure a common,
@@ -20,6 +34,820 @@ class SkbioObject(with_metaclass(ABCMeta, object)):
     be implemented in subclasses, otherwise they will not be instantiable.
 
     """
-    @abstractmethod
+    @abc.abstractmethod
     def __str__(self):
         pass
+
+
+class MetadataMixin(with_metaclass(abc.ABCMeta, object)):
+    @property
+    @stable(as_of="0.4.0")
+    def metadata(self):
+        """``dict`` containing metadata which applies to the entire object.
+
+        Notes
+        -----
+        This property can be set and deleted. When setting new metadata a
+        shallow copy of the dictionary is made.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with metadata share a common interface for
+           accessing and manipulating their metadata. The following examples
+           use scikit-bio's ``Sequence`` class to demonstrate metadata
+           behavior. These examples apply to all other scikit-bio objects
+           storing metadata.
+
+        Create a sequence with metadata:
+
+        >>> from pprint import pprint
+        >>> from skbio import Sequence
+        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id',
+        ...                                  'description': 'seq description'})
+
+        Retrieve metadata:
+
+        >>> pprint(seq.metadata) # using pprint to display dict in sorted order
+        {'description': 'seq description', 'id': 'seq-id'}
+
+        Update metadata:
+
+        >>> seq.metadata['id'] = 'new-id'
+        >>> seq.metadata['pubmed'] = 12345
+        >>> pprint(seq.metadata)
+        {'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
+
+        Set metadata:
+
+        >>> seq.metadata = {'abc': 123}
+        >>> seq.metadata
+        {'abc': 123}
+
+        Delete metadata:
+
+        >>> seq.has_metadata()
+        True
+        >>> del seq.metadata
+        >>> seq.metadata
+        {}
+        >>> seq.has_metadata()
+        False
+
+        """
+        if self._metadata is None:
+            # Not using setter to avoid copy.
+            self._metadata = {}
+        return self._metadata
+
+    @metadata.setter
+    def metadata(self, metadata):
+        if not isinstance(metadata, dict):
+            raise TypeError("metadata must be a dict")
+        # Shallow copy.
+        self._metadata = metadata.copy()
+
+    @metadata.deleter
+    def metadata(self):
+        self._metadata = None
+
+    @abc.abstractmethod
+    def __init__(self, metadata=None):
+        pass
+
+    def _init_(self, metadata=None):
+        if metadata is None:
+            self._metadata = None
+        else:
+            self.metadata = metadata
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        pass
+
+    def _eq_(self, other):
+        # We're not simply comparing self.metadata to other.metadata in order
+        # to avoid creating "empty" metadata representations on the objects if
+        # they don't have metadata.
+        if self.has_metadata() and other.has_metadata():
+            if self.metadata != other.metadata:
+                return False
+        elif not (self.has_metadata() or other.has_metadata()):
+            # Both don't have metadata.
+            pass
+        else:
+            # One has metadata while the other does not.
+            return False
+
+        return True
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        pass
+
+    def _ne_(self, other):
+        return not (self == other)
+
+    @abc.abstractmethod
+    def __copy__(self):
+        pass
+
+    def _copy_(self):
+        if self.has_metadata():
+            return self.metadata.copy()
+        else:
+            return None
+
+    @abc.abstractmethod
+    def __deepcopy__(self, memo):
+        pass
+
+    def _deepcopy_(self, memo):
+        if self.has_metadata():
+            return copy.deepcopy(self.metadata, memo)
+        else:
+            return None
+
+    @stable(as_of="0.4.0")
+    def has_metadata(self):
+        """Determine if the object has metadata.
+
+        An object has metadata if its ``metadata`` dictionary is not empty
+        (i.e., has at least one key-value pair).
+
+        Returns
+        -------
+        bool
+            Indicates whether the object has metadata.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with metadata share a common interface for
+           accessing and manipulating their metadata. The following examples
+           use scikit-bio's ``Sequence`` class to demonstrate metadata
+           behavior. These examples apply to all other scikit-bio objects
+           storing metadata.
+
+        >>> from skbio import Sequence
+        >>> seq = Sequence('ACGT')
+        >>> seq.has_metadata()
+        False
+        >>> seq = Sequence('ACGT', metadata={})
+        >>> seq.has_metadata()
+        False
+        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id'})
+        >>> seq.has_metadata()
+        True
+
+        """
+        return self._metadata is not None and bool(self.metadata)
+
+
+class PositionalMetadataMixin(with_metaclass(abc.ABCMeta, object)):
+    @abc.abstractmethod
+    def _positional_metadata_axis_len_(self):
+        """Return length of axis that positional metadata applies to.
+
+        Returns
+        -------
+        int
+            Positional metadata axis length.
+
+        """
+        pass
+
+    @property
+    @stable(as_of="0.4.0")
+    def positional_metadata(self):
+        """``pd.DataFrame`` containing metadata along an axis.
+
+        Notes
+        -----
+        This property can be set and deleted. When setting new positional
+        metadata a shallow copy is made.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with positional metadata share a common
+           interface for accessing and manipulating their positional metadata.
+           The following examples use scikit-bio's ``DNA`` class to demonstrate
+           positional metadata behavior. These examples apply to all other
+           scikit-bio objects storing positional metadata.
+
+        Create a DNA sequence with positional metadata:
+
+        >>> from skbio import DNA
+        >>> seq = DNA(
+        ...     'ACGT',
+        ...     positional_metadata={'quality': [3, 3, 20, 11],
+        ...                          'exons': [True, True, False, True]})
+        >>> seq
+        DNA
+        -----------------------------
+        Positional metadata:
+            'exons': <dtype: bool>
+            'quality': <dtype: int64>
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 50.00%
+        -----------------------------
+        0 ACGT
+
+        Retrieve positional metadata:
+
+        >>> seq.positional_metadata
+           exons  quality
+        0   True        3
+        1   True        3
+        2  False       20
+        3   True       11
+
+        Update positional metadata:
+
+        >>> seq.positional_metadata['gaps'] = seq.gaps()
+        >>> seq.positional_metadata
+           exons  quality   gaps
+        0   True        3  False
+        1   True        3  False
+        2  False       20  False
+        3   True       11  False
+
+        Set positional metadata:
+
+        >>> seq.positional_metadata = {'degenerates': seq.degenerates()}
+        >>> seq.positional_metadata
+          degenerates
+        0       False
+        1       False
+        2       False
+        3       False
+
+        Delete positional metadata:
+
+        >>> seq.has_positional_metadata()
+        True
+        >>> del seq.positional_metadata
+        >>> seq.positional_metadata
+        Empty DataFrame
+        Columns: []
+        Index: [0, 1, 2, 3]
+        >>> seq.has_positional_metadata()
+        False
+
+        """
+        if self._positional_metadata is None:
+            # Not using setter to avoid copy.
+            self._positional_metadata = pd.DataFrame(
+                index=np.arange(self._positional_metadata_axis_len_()))
+        return self._positional_metadata
+
+    @positional_metadata.setter
+    def positional_metadata(self, positional_metadata):
+        try:
+            # Pass copy=True to copy underlying data buffer.
+            positional_metadata = pd.DataFrame(positional_metadata, copy=True)
+        except pd.core.common.PandasError as e:
+            raise TypeError(
+                "Invalid positional metadata. Must be consumable by "
+                "`pd.DataFrame` constructor. Original pandas error message: "
+                "\"%s\"" % e)
+
+        num_rows = len(positional_metadata.index)
+        axis_len = self._positional_metadata_axis_len_()
+        if num_rows != axis_len:
+            raise ValueError(
+                "Number of positional metadata values (%d) must match the "
+                "positional metadata axis length (%d)."
+                % (num_rows, axis_len))
+
+        positional_metadata.reset_index(drop=True, inplace=True)
+        self._positional_metadata = positional_metadata
+
+    @positional_metadata.deleter
+    def positional_metadata(self):
+        self._positional_metadata = None
+
+    @abc.abstractmethod
+    def __init__(self, positional_metadata=None):
+        pass
+
+    def _init_(self, positional_metadata=None):
+        if positional_metadata is None:
+            self._positional_metadata = None
+        else:
+            self.positional_metadata = positional_metadata
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        pass
+
+    def _eq_(self, other):
+        # We're not simply comparing self.positional_metadata to
+        # other.positional_metadata in order to avoid creating "empty"
+        # positional metadata representations on the objects if they don't have
+        # positional metadata.
+        if self.has_positional_metadata() and other.has_positional_metadata():
+            if not self.positional_metadata.equals(other.positional_metadata):
+                return False
+        elif not (self.has_positional_metadata() or
+                  other.has_positional_metadata()):
+            # Both don't have positional metadata.
+            pass
+        else:
+            # One has positional metadata while the other does not.
+            return False
+
+        return True
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        pass
+
+    def _ne_(self, other):
+        return not (self == other)
+
+    @abc.abstractmethod
+    def __copy__(self):
+        pass
+
+    def _copy_(self):
+        if self.has_positional_metadata():
+            # deep=True makes a shallow copy of the underlying data buffer.
+            return self.positional_metadata.copy(deep=True)
+        else:
+            return None
+
+    @abc.abstractmethod
+    def __deepcopy__(self, memo):
+        pass
+
+    def _deepcopy_(self, memo):
+        if self.has_positional_metadata():
+            return copy.deepcopy(self.positional_metadata, memo)
+        else:
+            return None
+
+    @stable(as_of="0.4.0")
+    def has_positional_metadata(self):
+        """Determine if the object has positional metadata.
+
+        An object has positional metadata if its ``positional_metadata``
+        ``pd.DataFrame`` has at least one column.
+
+        Returns
+        -------
+        bool
+            Indicates whether the object has positional metadata.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with positional metadata share a common
+           interface for accessing and manipulating their positional metadata.
+           The following examples use scikit-bio's ``DNA`` class to demonstrate
+           positional metadata behavior. These examples apply to all other
+           scikit-bio objects storing positional metadata.
+
+        >>> import pandas as pd
+        >>> from skbio import DNA
+        >>> seq = DNA('ACGT')
+        >>> seq.has_positional_metadata()
+        False
+        >>> seq = DNA('ACGT', positional_metadata=pd.DataFrame(index=range(4)))
+        >>> seq.has_positional_metadata()
+        False
+        >>> seq = DNA('ACGT', positional_metadata={'quality': range(4)})
+        >>> seq.has_positional_metadata()
+        True
+
+        """
+        return (self._positional_metadata is not None and
+                len(self.positional_metadata.columns) > 0)
+
+
+class OrdinationResults(SkbioObject):
+    """Store ordination results, providing serialization and plotting support.
+
+    Stores various components of ordination results. Provides methods for
+    serializing/deserializing results, as well as generation of basic
+    matplotlib 3-D scatterplots. Will automatically display PNG/SVG
+    representations of itself within the IPython Notebook.
+
+    Attributes
+    ----------
+    short_method_name : str
+        Abbreviated ordination method name.
+    long_method_name : str
+        Ordination method name.
+    eigvals : pd.Series
+        The resulting eigenvalues.  The index corresponds to the ordination
+        axis labels
+    samples : pd.DataFrame
+        The position of the samples in the ordination space, row-indexed by the
+        sample id.
+    features : pd.DataFrame
+        The position of the features in the ordination space, row-indexed by
+        the feature id.
+    biplot_scores : pd.DataFrame
+        Correlation coefficients of the samples with respect to the features.
+    sample_constraints : pd.DataFrame
+        Site constraints (linear combinations of constraining variables):
+        coordinates of the sites in the space of the explanatory variables X.
+        These are the fitted site scores
+    proportion_explained : pd.Series
+        Proportion explained by each of the dimensions in the ordination space.
+        The index corresponds to the ordination axis labels
+    png
+    svg
+
+    See Also
+    --------
+    ca
+    cca
+    pcoa
+    rda
+    """
+    default_write_format = 'ordination'
+
+    @experimental(as_of="0.4.0")
+    def __init__(self, short_method_name, long_method_name, eigvals,
+                 samples, features=None, biplot_scores=None,
+                 sample_constraints=None, proportion_explained=None):
+
+        self.short_method_name = short_method_name
+        self.long_method_name = long_method_name
+
+        self.eigvals = eigvals
+        self.samples = samples
+        self.features = features
+        self.biplot_scores = biplot_scores
+        self.sample_constraints = sample_constraints
+        self.proportion_explained = proportion_explained
+
+    @experimental(as_of="0.4.0")
+    def __str__(self):
+        """Return a string representation of the ordination results.
+
+        String representation lists ordination results attributes and indicates
+        whether or not they are present. If an attribute is present, its
+        dimensions are listed. A truncated list of features and sample IDs are
+        included (if they are present).
+
+        Returns
+        -------
+        str
+            String representation of the ordination results.
+
+        .. shownumpydoc
+
+        """
+        lines = ['Ordination results:']
+        method = '%s (%s)' % (self.long_method_name, self.short_method_name)
+        lines.append(self._format_attribute(method, 'Method', str))
+
+        attrs = [(self.eigvals, 'Eigvals'),
+                 (self.proportion_explained, 'Proportion explained'),
+                 (self.features, 'Features'),
+                 (self.samples, 'Samples'),
+                 (self.biplot_scores, 'Biplot Scores'),
+                 (self.sample_constraints, 'Sample constraints')]
+        for attr, attr_label in attrs:
+            def formatter(e):
+                return 'x'.join(['%d' % s for s in e.shape])
+
+            lines.append(self._format_attribute(attr, attr_label, formatter))
+
+        lines.append(self._format_attribute(
+            self.features, 'Feature IDs',
+            lambda e: _pprint_strs(e.index.tolist())))
+        lines.append(self._format_attribute(
+            self.samples, 'Sample IDs',
+            lambda e: _pprint_strs(e.index.tolist())))
+
+        return '\n'.join(lines)
+
+    @experimental(as_of="0.4.0")
+    def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
+             title='', cmap=None, s=20):
+        """Create a 3-D scatterplot of ordination results colored by metadata.
+
+        Creates a 3-D scatterplot of the ordination results, where each point
+        represents a sample. Optionally, these points can be colored by
+        metadata (see `df` and `column` below).
+
+        Parameters
+        ----------
+        df : pd.DataFrame, optional
+            ``DataFrame`` containing sample metadata. Must be indexed by sample
+            ID, and all sample IDs in the ordination results must exist in the
+            ``DataFrame``. If ``None``, samples (i.e., points) will not be
+            colored by metadata.
+        column : str, optional
+            Column name in `df` to color samples (i.e., points in the plot) by.
+            Cannot have missing data (i.e., ``np.nan``). `column` can be
+            numeric or categorical. If numeric, all values in the column will
+            be cast to ``float`` and mapped to colors using `cmap`. A colorbar
+            will be included to serve as a legend. If categorical (i.e., not
+            all values in `column` could be cast to ``float``), colors will be
+            chosen for each category using evenly-spaced points along `cmap`. A
+            legend will be included. If ``None``, samples (i.e., points) will
+            not be colored by metadata.
+        axes : iterable of int, optional
+            Indices of sample coordinates to plot on the x-, y-, and z-axes.
+            For example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
+            PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
+            Must contain exactly three elements.
+        axis_labels : iterable of str, optional
+            Labels for the x-, y-, and z-axes. If ``None``, labels will be the
+            values of `axes` cast as strings.
+        title : str, optional
+            Plot title.
+        cmap : str or matplotlib.colors.Colormap, optional
+            Name or instance of matplotlib colormap to use for mapping `column`
+            values to colors. If ``None``, defaults to the colormap specified
+            in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
+            are recommended for categorical data, while sequential colormaps
+            (e.g., ``Greys``) are recommended for numeric data. See [1]_ for
+            these colormap classifications.
+        s : scalar or iterable of scalars, optional
+            Size of points. See matplotlib's ``Axes3D.scatter`` documentation
+            for more details.
+
+        Returns
+        -------
+        matplotlib.figure.Figure
+            Figure containing the scatterplot and legend/colorbar if metadata
+            were provided.
+
+        Raises
+        ------
+        ValueError
+            Raised on invalid input, including the following situations:
+
+            - there are not at least three dimensions to plot
+            - there are not exactly three values in `axes`, they are not
+              unique, or are out of range
+            - there are not exactly three values in `axis_labels`
+            - either `df` or `column` is provided without the other
+            - `column` is not in the ``DataFrame``
+            - sample IDs in the ordination results are not in `df` or have
+              missing data in `column`
+
+        See Also
+        --------
+        mpl_toolkits.mplot3d.Axes3D.scatter
+
+        Notes
+        -----
+        This method creates basic plots of ordination results, and is intended
+        to provide a quick look at the results in the context of metadata
+        (e.g., from within the IPython Notebook). For more customization and to
+        generate publication-quality figures, we recommend EMPeror [2]_.
+
+        References
+        ----------
+        .. [1] http://matplotlib.org/examples/color/colormaps_reference.html
+        .. [2] EMPeror: a tool for visualizing high-throughput microbial
+           community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
+           Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
+
+        Examples
+        --------
+        .. plot::
+
+           Define a distance matrix with four samples labelled A-D:
+
+           >>> from skbio import DistanceMatrix
+           >>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
+           ...                      [0.21712454, 0., 0.45995501, 0.80332382],
+           ...                      [0.5007512, 0.45995501, 0., 0.65463348],
+           ...                      [0.91769271, 0.80332382, 0.65463348, 0.]],
+           ...                     ['A', 'B', 'C', 'D'])
+
+           Define metadata for each sample in a ``pandas.DataFrame``:
+
+           >>> import pandas as pd
+           >>> metadata = {
+           ...     'A': {'body_site': 'skin'},
+           ...     'B': {'body_site': 'gut'},
+           ...     'C': {'body_site': 'gut'},
+           ...     'D': {'body_site': 'skin'}}
+           >>> df = pd.DataFrame.from_dict(metadata, orient='index')
+
+           Run principal coordinate analysis (PCoA) on the distance matrix:
+
+           >>> from skbio.stats.ordination import pcoa
+           >>> pcoa_results = pcoa(dm)
+
+           Plot the ordination results, where each sample is colored by body
+           site (a categorical variable):
+
+           >>> fig = pcoa_results.plot(df=df, column='body_site',
+           ...                         title='Samples colored by body site',
+           ...                         cmap='Set1', s=50)
+
+        """
+        # Note: New features should not be added to this method and should
+        # instead be added to EMPeror (http://biocore.github.io/emperor/).
+        # Only bug fixes and minor updates should be made to this method.
+
+        coord_matrix = self.samples.values.T
+        self._validate_plot_axes(coord_matrix, axes)
+
+        # derived from
+        # http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
+        fig = plt.figure()
+        # create the axes, leaving room for a legend as described here:
+        # http://stackoverflow.com/a/9651897/3424666
+        ax = fig.add_axes([0.1, 0.1, 0.6, 0.75], projection='3d')
+
+        xs = coord_matrix[axes[0]]
+        ys = coord_matrix[axes[1]]
+        zs = coord_matrix[axes[2]]
+
+        point_colors, category_to_color = self._get_plot_point_colors(
+            df, column, self.samples.index, cmap)
+
+        scatter_fn = functools.partial(ax.scatter, xs, ys, zs, s=s)
+        if point_colors is None:
+            plot = scatter_fn()
+        else:
+            plot = scatter_fn(c=point_colors, cmap=cmap)
+
+        if axis_labels is None:
+            axis_labels = ['%d' % axis for axis in axes]
+        elif len(axis_labels) != 3:
+            raise ValueError("axis_labels must contain exactly three elements "
+                             "(found %d elements)." % len(axis_labels))
+
+        ax.set_xlabel(axis_labels[0])
+        ax.set_ylabel(axis_labels[1])
+        ax.set_zlabel(axis_labels[2])
+        ax.set_xticklabels([])
+        ax.set_yticklabels([])
+        ax.set_zticklabels([])
+        ax.set_title(title)
+
+        # create legend/colorbar
+        if point_colors is not None:
+            if category_to_color is None:
+                fig.colorbar(plot)
+            else:
+                self._plot_categorical_legend(ax, category_to_color)
+
+        return fig
+
+    def _validate_plot_axes(self, coord_matrix, axes):
+        """Validate `axes` against coordinates matrix."""
+        num_dims = coord_matrix.shape[0]
+        if num_dims < 3:
+            raise ValueError("At least three dimensions are required to plot "
+                             "ordination results. There are only %d "
+                             "dimension(s)." % num_dims)
+        if len(axes) != 3:
+            raise ValueError("`axes` must contain exactly three elements "
+                             "(found %d elements)." % len(axes))
+        if len(set(axes)) != 3:
+            raise ValueError("The values provided for `axes` must be unique.")
+
+        for idx, axis in enumerate(axes):
+            if axis < 0 or axis >= num_dims:
+                raise ValueError("`axes[%d]` must be >= 0 and < %d." %
+                                 (idx, num_dims))
+
+    def _get_plot_point_colors(self, df, column, ids, cmap):
+        """Return a list of colors for each plot point given a metadata column.
+
+        If `column` is categorical, additionally returns a dictionary mapping
+        each category (str) to color (used for legend creation).
+
+        """
+        if ((df is None and column is not None) or (df is not None and
+                                                    column is None)):
+            raise ValueError("Both df and column must be provided, or both "
+                             "must be None.")
+        elif df is None and column is None:
+            point_colors, category_to_color = None, None
+        else:
+            if column not in df:
+                raise ValueError("Column '%s' not in data frame." % column)
+
+            col_vals = df.loc[ids, column]
+
+            if col_vals.isnull().any():
+                raise ValueError("One or more IDs in the ordination results "
+                                 "are not in the data frame, or there is "
+                                 "missing data in the data frame's '%s' "
+                                 "column." % column)
+
+            category_to_color = None
+            try:
+                point_colors = col_vals.astype(float)
+            except ValueError:
+                # we have categorical data, so choose a color for each
+                # category, where colors are evenly spaced across the
+                # colormap.
+                # derived from http://stackoverflow.com/a/14887119
+                categories = col_vals.unique()
+                cmap = plt.get_cmap(cmap)
+                category_colors = cmap(np.linspace(0, 1, len(categories)))
+
+                category_to_color = dict(zip(categories, category_colors))
+                point_colors = col_vals.apply(lambda x: category_to_color[x])
+
+            point_colors = point_colors.tolist()
+
+        return point_colors, category_to_color
+
+    def _plot_categorical_legend(self, ax, color_dict):
+        """Add legend to plot using specified mapping of category to color."""
+        # derived from http://stackoverflow.com/a/20505720
+        proxies = []
+        labels = []
+        for category in color_dict:
+            proxy = mpl.lines.Line2D([0], [0], linestyle='none',
+                                     c=color_dict[category], marker='o')
+            proxies.append(proxy)
+            labels.append(category)
+
+        # place legend outside of the axes (centered)
+        # derived from http://matplotlib.org/users/legend_guide.html
+        ax.legend(proxies, labels, numpoints=1, loc=6,
+                  bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
+
+    # Here we define the special repr methods that provide the IPython display
+    # protocol. Code derived from:
+    #     https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
+    #         Custom%20Display%20Logic.ipynb
+    # See licenses/ipython.txt for more details.
+
+    def _repr_png_(self):
+        return self._figure_data('png')
+
+    def _repr_svg_(self):
+        return self._figure_data('svg')
+
+    # We expose the above reprs as properties, so that the user can see them
+    # directly (since otherwise the client dictates which one it shows by
+    # default)
+    @property
+    @experimental(as_of="0.4.0")
+    def png(self):
+        """Display basic 3-D scatterplot in IPython Notebook as PNG."""
+        return Image(self._repr_png_(), embed=True)
+
+    @property
+    @experimental(as_of="0.4.0")
+    def svg(self):
+        """Display basic 3-D scatterplot in IPython Notebook as SVG."""
+        return SVG(self._repr_svg_())
+
+    def _figure_data(self, format):
+        fig = self.plot()
+        data = print_figure(fig, format)
+        # We MUST close the figure, otherwise IPython's display machinery
+        # will pick it up and send it as output, resulting in a double display
+        plt.close(fig)
+        return data
+
+    def _format_attribute(self, attr, attr_label, formatter):
+        if attr is None:
+            formatted_attr = 'N/A'
+        else:
+            formatted_attr = formatter(attr)
+        return '\t%s: %s' % (attr_label, formatted_attr)
+
+
+class ElasticLines(object):
+    """Store blocks of content separated by dashed lines.
+
+    Each dashed line (separator) is as long as the longest content
+    (non-separator) line.
+
+    """
+
+    def __init__(self):
+        self._lines = []
+        self._separator_idxs = []
+        self._max_line_len = -1
+
+    def add_line(self, line):
+        line_len = len(line)
+        if line_len > self._max_line_len:
+            self._max_line_len = line_len
+        self._lines.append(line)
+
+    def add_lines(self, lines):
+        for line in lines:
+            self.add_line(line)
+
+    def add_separator(self):
+        self._lines.append(None)
+        self._separator_idxs.append(len(self._lines) - 1)
+
+    def to_str(self):
+        separator = '-' * self._max_line_len
+        for idx in self._separator_idxs:
+            self._lines[idx] = separator
+        return '\n'.join(self._lines)
diff --git a/skbio/alignment/__init__.py b/skbio/alignment/__init__.py
index 48c2819..d5eb52c 100644
--- a/skbio/alignment/__init__.py
+++ b/skbio/alignment/__init__.py
@@ -1,15 +1,12 @@
 r"""
-Alignments and Sequence collections (:mod:`skbio.alignment`)
-============================================================
+Alignments (:mod:`skbio.alignment`)
+===================================
 
 .. currentmodule:: skbio.alignment
 
-This module provides functionality for working with biological sequence
-collections and alignments. These can be composed of generic sequences,
-nucelotide sequences, DNA sequences, and RNA sequences. By default, input is
-not validated, except that sequence ids must be unique, but all
-contructor methods take a validate option which checks different features of
-the input based on ``SequenceCollection`` type.
+This module provides functionality for computing and manipulating sequence
+alignments. DNA, RNA, and protein sequences can be aligned, as well as
+sequences with custom alphabets.
 
 Data Structures
 ---------------
@@ -17,8 +14,7 @@ Data Structures
 .. autosummary::
    :toctree: generated/
 
-   SequenceCollection
-   Alignment
+   TabularMSA
 
 Optimized (i.e., production-ready) Alignment Algorithms
 -------------------------------------------------------
@@ -51,37 +47,26 @@ General functionality
 
     make_identity_substitution_matrix
 
-Exceptions
-----------
-
-.. autosummary::
-   :toctree: generated/
-
-   SequenceCollectionError
-   AlignmentError
-
 Data Structure Examples
 -----------------------
->>> from skbio import SequenceCollection, Alignment, DNA
->>> seqs = [DNA("ACC--G-GGTA..", metadata={'id':"seq1"}),
-...         DNA("TCC--G-GGCA..", metadata={'id':"seqs2"})]
->>> a1 = Alignment(seqs)
->>> a1
-<Alignment: n=2; mean +/- std length=13.00 +/- 0.00>
-
->>> seqs = [DNA("ACCGGG", metadata={'id':"seq1"}),
-...         DNA("TCCGGGCA", metadata={'id':"seq2"})]
->>> s1 = SequenceCollection(seqs)
->>> s1
-<SequenceCollection: n=2; mean +/- std length=7.00 +/- 1.00>
-
->>> fasta_lines = [u'>seq1\n',
-...                u'CGATGTCGATCGATCGATCGATCAG\n',
-...                u'>seq2\n',
-...                u'CATCGATCGATCGATGCATGCATGCATG\n']
->>> s1 = SequenceCollection.read(fasta_lines, constructor=DNA)
->>> s1
-<SequenceCollection: n=2; mean +/- std length=26.50 +/- 1.50>
+Load two DNA sequences that have been previously aligned into a ``TabularMSA``
+object, using sequence IDs as the MSA's index:
+
+>>> from skbio import TabularMSA, DNA
+>>> seqs = [DNA("ACC--G-GGTA..", metadata={'id': "seq1"}),
+...         DNA("TCC--G-GGCA..", metadata={'id': "seq2"})]
+>>> msa = TabularMSA(seqs, minter='id')
+>>> msa
+TabularMSA[DNA]
+----------------------
+Stats:
+    sequence count: 2
+    position count: 13
+----------------------
+ACC--G-GGTA..
+TCC--G-GGCA..
+>>> msa.index
+Index(['seq1', 'seq2'], dtype='object')
 
 Alignment Algorithm Examples
 ----------------------------
@@ -91,16 +76,23 @@ Optimized Alignment Algorithm Examples
 Using the convenient ``local_pairwise_align_ssw`` function:
 
 >>> from skbio.alignment import local_pairwise_align_ssw
->>> alignment = local_pairwise_align_ssw(
-...                 "ACTAAGGCTCTCTACCCCTCTCAGAGA",
-...                 "ACTAAGGCTCCTAACCCCCTTTTCTCAGA"
-...             )
->>> print(alignment)
->query
+>>> alignment, score, start_end_positions = local_pairwise_align_ssw(
+...     DNA("ACTAAGGCTCTCTACCCCTCTCAGAGA"),
+...     DNA("ACTAAGGCTCCTAACCCCCTTTTCTCAGA")
+... )
+>>> alignment
+TabularMSA[DNA]
+------------------------------
+Stats:
+    sequence count: 2
+    position count: 30
+------------------------------
 ACTAAGGCTCTC-TACCC----CTCTCAGA
->target
 ACTAAGGCTC-CTAACCCCCTTTTCTCAGA
-<BLANKLINE>
+>>> score
+27
+>>> start_end_positions
+[(0, 24), (0, 28)]
 
 Using the ``StripedSmithWaterman`` object:
 
@@ -156,44 +148,47 @@ Here we locally align a pair of protein sequences using gap open penalty
 of 11 and a gap extend penalty of 1 (in other words, it is much more
 costly to open a new gap than extend an existing one).
 
+>>> from skbio import Protein
 >>> from skbio.alignment import local_pairwise_align_protein
->>> s1 = "HEAGAWGHEE"
->>> s2 = "PAWHEAE"
->>> r = local_pairwise_align_protein(s1, s2, 11, 1)
+>>> s1 = Protein("HEAGAWGHEE")
+>>> s2 = Protein("PAWHEAE")
+>>> alignment, score, start_end_positions = local_pairwise_align_protein(
+...     s1, s2, 11, 1)
 
-This returns an ``skbio.Alignment`` object. We can look at the aligned
-sequences:
+This returns an ``skbio.TabularMSA`` object, the alignment score, and start/end
+positions of each aligned sequence:
 
->>> print(str(r[0]))
+>>> alignment
+TabularMSA[Protein]
+---------------------
+Stats:
+    sequence count: 2
+    position count: 5
+---------------------
 AWGHE
->>> print(str(r[1]))
 AW-HE
-
-We can identify the start and end positions of each aligned sequence
-as follows:
-
->>> r.start_end_positions()
-[(4, 8), (1, 4)]
-
-And we can view the score of the alignment using the ``score`` method:
-
->>> r.score()
+>>> score
 25.0
+>>> start_end_positions
+[(4, 8), (1, 4)]
 
-Similarly, we can perform global alignment of nucleotide sequences, and print
-the resulting alignment in FASTA format:
+Similarly, we can perform global alignment of nucleotide sequences:
 
+>>> from skbio import DNA
 >>> from skbio.alignment import global_pairwise_align_nucleotide
->>> s1 = "GCGTGCCTAAGGTATGCAAG"
->>> s2 = "ACGTGCCTAGGTACGCAAG"
->>> r = global_pairwise_align_nucleotide(s1, s2)
->>> print(r)
->0
+>>> s1 = DNA("GCGTGCCTAAGGTATGCAAG")
+>>> s2 = DNA("ACGTGCCTAGGTACGCAAG")
+>>> alignment, score, start_end_positions = global_pairwise_align_nucleotide(
+...     s1, s2)
+>>> alignment
+TabularMSA[DNA]
+----------------------
+Stats:
+    sequence count: 2
+    position count: 20
+----------------------
 GCGTGCCTAAGGTATGCAAG
->1
 ACGTGCCTA-GGTACGCAAG
-<BLANKLINE>
-
 
 """
 
@@ -209,7 +204,7 @@ from __future__ import absolute_import, division, print_function
 
 from skbio.util import TestRunner
 
-from ._alignment import Alignment, SequenceCollection
+from ._tabular_msa import TabularMSA
 from ._pairwise import (
     local_pairwise_align_nucleotide, local_pairwise_align_protein,
     local_pairwise_align, global_pairwise_align_nucleotide,
@@ -218,12 +213,9 @@ from ._pairwise import (
 )
 from skbio.alignment._ssw_wrapper import (
     StripedSmithWaterman, AlignmentStructure)
-from ._exception import (SequenceCollectionError, AlignmentError)
 
-__all__ = ['Alignment', 'SequenceCollection',
-           'StripedSmithWaterman', 'AlignmentStructure',
-           'local_pairwise_align_ssw', 'SequenceCollectionError',
-           'AlignmentError', 'global_pairwise_align',
+__all__ = ['TabularMSA', 'StripedSmithWaterman', 'AlignmentStructure',
+           'local_pairwise_align_ssw', 'global_pairwise_align',
            'global_pairwise_align_nucleotide', 'global_pairwise_align_protein',
            'local_pairwise_align', 'local_pairwise_align_nucleotide',
            'local_pairwise_align_protein', 'make_identity_substitution_matrix']
diff --git a/skbio/alignment/_alignment.py b/skbio/alignment/_alignment.py
deleted file mode 100644
index 2a7440e..0000000
--- a/skbio/alignment/_alignment.py
+++ /dev/null
@@ -1,1374 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-from future.builtins import zip, range
-from future.utils import viewkeys, viewitems
-
-from collections import Counter, defaultdict
-
-import numpy as np
-from scipy.stats import entropy
-import six
-
-from skbio._base import SkbioObject
-from skbio.sequence import Sequence
-from skbio.stats.distance import DistanceMatrix
-from ._exception import (SequenceCollectionError, AlignmentError)
-from skbio.util._decorator import experimental
-
-
-class SequenceCollection(SkbioObject):
-    """Class for storing collections of biological sequences.
-
-    Parameters
-    ----------
-    seqs : list of `skbio.Sequence` objects
-        The `skbio.Sequence` objects to load into a new `SequenceCollection`
-        object.
-    validate : bool, optional
-        If True, runs the `is_valid` method after construction and raises
-        `SequenceCollectionError` if ``is_valid == False``.
-
-    Raises
-    ------
-    skbio.SequenceCollectionError
-        If ``validate == True`` and ``is_valid == False``.
-
-    See Also
-    --------
-    skbio
-    skbio.DNA
-    skbio.RNA
-    skbio.Protein
-    Alignment
-
-    Examples
-    --------
-    >>> from skbio import SequenceCollection
-    >>> from skbio import DNA
-    >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
-    ...              DNA('AACCGGT', metadata={'id': "seq2"})]
-    >>> s1 = SequenceCollection(sequences)
-    >>> s1
-    <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
-
-    """
-    default_write_format = 'fasta'
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, seqs):
-        # TODO: find a good way to support generic Sequence objects in
-        # SequenceCollection and Alignment. The issue is that some methods
-        # assume that a sequence has knowledge of gap characters and a
-        # standard alphabet, which aren't present on Sequence. For now, if
-        # these methods are called by a user they'll get an error (likely
-        # an AttributeError).
-        self._data = seqs
-        self._id_to_index = {}
-        for i, seq in enumerate(self._data):
-            if 'id' not in seq.metadata:
-                raise SequenceCollectionError(
-                    "'id' must be included in the sequence metadata")
-            id_ = seq.metadata['id']
-
-            if id_ in self:
-                raise SequenceCollectionError(
-                    "All sequence ids must be unique, but "
-                    "id '%s' is present multiple times." % id_)
-            else:
-                self._id_to_index[id_] = i
-
-    @experimental(as_of="0.4.0")
-    def __contains__(self, id):
-        r"""The in operator.
-
-        Parameters
-        ----------
-        id : str
-            The `skbio.Sequence.id` to look up in the `SequenceCollection`.
-
-        Returns
-        -------
-        bool
-            Returns `True` if `id` is the `skbio.Sequence.id` of a sequence in
-            the `SequenceCollection`.
-
-        """
-        return id in self._id_to_index
-
-    @experimental(as_of="0.4.0")
-    def __eq__(self, other):
-        r"""The equality operator.
-
-        Parameters
-        ----------
-        other : `SequenceCollection`
-            The `SequenceCollection` to test for equality against.
-
-        Returns
-        -------
-        bool
-            Indicates whether `self` and `other` are equal.
-
-        Notes
-        -----
-        `SequenceCollection` objects are equal if they are the same type,
-        contain the same number of sequences, and if each of the
-        `skbio.Sequence` objects, in order, are equal.
-
-        """
-        if self.__class__ != other.__class__:
-            return False
-        elif len(self) != len(other):
-            return False
-        else:
-            for self_seq, other_seq in zip(self, other):
-                if self_seq != other_seq:
-                    return False
-        return True
-
-    @experimental(as_of="0.4.0")
-    def __getitem__(self, index):
-        r"""The indexing operator.
-
-        Parameters
-        ----------
-        index : int, str
-            The position or sequence id of the `skbio.Sequence` to return from
-            the `SequenceCollection`.
-
-        Returns
-        -------
-        skbio.Sequence
-            The `skbio.Sequence` at the specified index in the
-            `SequenceCollection`.
-
-        Examples
-        --------
-        >>> from skbio import DNA, SequenceCollection
-        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
-        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
-        >>> sc = SequenceCollection(sequences)
-        >>> sc[0]
-        DNA
-        -----------------------------
-        Metadata:
-            'id': 'seq1'
-        Stats:
-            length: 5
-            has gaps: False
-            has degenerates: False
-            has non-degenerates: True
-            GC-content: 60.00%
-        -----------------------------
-        0 ACCGT
-        >>> sc["seq1"]
-        DNA
-        -----------------------------
-        Metadata:
-            'id': 'seq1'
-        Stats:
-            length: 5
-            has gaps: False
-            has degenerates: False
-            has non-degenerates: True
-            GC-content: 60.00%
-        -----------------------------
-        0 ACCGT
-
-        """
-        if isinstance(index, six.string_types):
-            return self.get_seq(index)
-        else:
-            return self._data[index]
-
-    @experimental(as_of="0.4.0")
-    def __iter__(self):
-        r"""The iter operator.
-
-        Returns
-        -------
-        iterator
-            `skbio.Sequence` iterator for the `SequenceCollection`.
-
-        """
-        return iter(self._data)
-
-    @experimental(as_of="0.4.0")
-    def __len__(self):
-        r"""The len operator.
-
-        Returns
-        -------
-        int
-            The number of sequences in the `SequenceCollection`.
-
-        """
-        return self.sequence_count()
-
-    @experimental(as_of="0.4.0")
-    def __ne__(self, other):
-        r"""The inequality operator.
-
-        Parameters
-        ----------
-        other : `SequenceCollection`
-
-        Returns
-        -------
-        bool
-            Indicates whether self and other are not equal.
-
-        Notes
-        -----
-        See `SequenceCollection.__eq__` for a description of what it means for
-        a pair of `SequenceCollection` objects to be equal.
-
-        """
-        return not self.__eq__(other)
-
-    @experimental(as_of="0.4.0")
-    def __repr__(self):
-        r"""The repr method.
-
-        Returns
-        -------
-        str
-            Returns a string representation of the object.
-
-        Notes
-        -----
-        String representation contains the class name, the number of sequences
-        in the `SequenceCollection` (n), and the mean and standard deviation
-        sequence length.
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection
-        >>> from skbio import DNA
-        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
-        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> print(repr(s1))
-        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
-
-        """
-        cn = self.__class__.__name__
-        count, center, spread = self.distribution_stats()
-        return "<%s: n=%d; mean +/- std length=%.2f +/- %.2f>" \
-            % (cn, count, center, spread)
-
-    @experimental(as_of="0.4.0")
-    def __reversed__(self):
-        """The reversed method.
-
-        Returns
-        -------
-        iterator
-            `skbio.Sequence` iterator for the `SequenceCollection` in reverse
-            order.
-
-        """
-        return reversed(self._data)
-
-    @experimental(as_of="0.4.0")
-    def __str__(self):
-        r"""The str method.
-
-        Returns
-        -------
-        str
-            Fasta-formatted string of all sequences in the object.
-
-        """
-        return str(''.join(self.write([], format='fasta')))
-
-    @experimental(as_of="0.4.0")
-    def distances(self, distance_fn):
-        """Compute distances between all pairs of sequences
-
-        Parameters
-        ----------
-        distance_fn : function
-            Function for computing the distance between a pair of sequences.
-            This must take two sequences as input (as `skbio.Sequence` objects)
-            and return a single integer or float value.
-
-        Returns
-        -------
-        skbio.DistanceMatrix
-            Matrix containing the distances between all pairs of sequences.
-
-        """
-        sequence_count = self.sequence_count()
-        dm = np.zeros((sequence_count, sequence_count))
-        ids = []
-        for i in range(sequence_count):
-            self_i = self[i]
-            ids.append(self_i.metadata['id'])
-            for j in range(i):
-                dm[i, j] = dm[j, i] = self_i.distance(self[j], distance_fn)
-        return DistanceMatrix(dm, ids)
-
-    @experimental(as_of="0.4.0")
-    def distribution_stats(self, center_f=np.mean, spread_f=np.std):
-        r"""Return sequence count, and center and spread of sequence lengths
-
-        Parameters
-        ----------
-        center_f : function
-            Should take an array_like object and return a single value
-            representing the center of the distribution.
-        spread_f : function
-            Should take an array_like object and return a single value
-            representing the spread of the distribution.
-
-        Returns
-        -------
-        tuple of (int, float, float)
-            The sequence count, center of length distribution, spread of length
-            distribution.
-
-        Notes
-        -----
-        Alternatives for `center_f` and `spread_f` could be median and median
-        absolute deviation.
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection
-        >>> from skbio import DNA
-        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
-        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> s1.distribution_stats()
-        (2, 6.0, 1.0)
-
-        """
-        if self.is_empty():
-            return (0, 0.0, 0.0)
-        else:
-            sequence_count = self.sequence_count()
-            sequence_lengths = self.sequence_lengths()
-            return (sequence_count, center_f(sequence_lengths),
-                    spread_f(sequence_lengths))
-
-    @experimental(as_of="0.4.0")
-    def degap(self):
-        r"""Return a new `SequenceCollection` with all gap characters removed.
-
-        Returns
-        -------
-        SequenceCollection
-            A new `SequenceCollection` where `skbio.Sequence.degap` has been
-            called on each sequence.
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection
-        >>> from skbio import DNA
-        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
-        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> s2 = s1.degap()
-        >>> s2
-        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
-
-        """
-        return SequenceCollection([seq.degap() for seq in self])
-
-    @experimental(as_of="0.4.0")
-    def get_seq(self, id):
-        r"""Return a sequence from the `SequenceCollection` by its id.
-
-        Parameters
-        ----------
-        id : str
-            The id of the sequence to return.
-
-        Returns
-        -------
-        skbio.Sequence
-            The `skbio.Sequence` with `id`.
-
-        Raises
-        ------
-        KeyError
-            If `id` is not in the `SequenceCollection` object.
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection
-        >>> from skbio import DNA
-        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
-        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> print(s1['seq1'])
-        A--CCGT.
-
-        """
-        return self[self._id_to_index[id]]
-
-    @experimental(as_of="0.4.0")
-    def ids(self):
-        """Returns the `Sequence` ids
-
-        Returns
-        -------
-        list
-            The ordered list of ids for the `skbio.Sequence` objects in the
-            `SequenceCollection`.
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection
-        >>> from skbio import DNA
-        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
-        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> print(s1.ids())
-        ['seq1', 'seq2']
-
-        """
-        return [seq.metadata['id'] for seq in self]
-
-    @experimental(as_of="0.4.0")
-    def update_ids(self, ids=None, func=None, prefix=""):
-        """Update sequence IDs on the sequence collection.
-
-        IDs can be updated by providing a sequence of new IDs (`ids`) or a
-        function that maps current IDs to new IDs (`func`).
-
-        Default behavior (if `ids` and `func` are not provided) is to create
-        new IDs that are unique postive integers (starting at 1) cast as
-        strings, optionally preceded by `prefix`. For example, ``('1', '2',
-        '3', ...)``.
-
-        Parameters
-        ----------
-        ids : sequence of str, optional
-            New IDs to update on the sequence collection.
-        func : function, optional
-            Function accepting a sequence of current IDs and returning a
-            sequence of new IDs to update on the sequence collection.
-        prefix : str, optional
-            If `ids` and `func` are both ``None``, `prefix` is prepended to
-            each new integer-based ID (see description of default behavior
-            above).
-
-        Returns
-        -------
-        SequenceCollection
-            New ``SequenceCollection`` (or subclass) containing sequences with
-            updated IDs.
-        dict
-            Mapping of new IDs to old IDs.
-
-        Raises
-        ------
-        SequenceCollectionError
-            If both `ids` and `func` are provided, `prefix` is provided with
-            either `ids` or `func`, or the number of new IDs does not match the
-            number of sequences in the sequence collection.
-
-        Notes
-        -----
-        The default behavior can be useful when writing sequences out for use
-        with programs that are picky about their sequence IDs
-        (e.g., RAxML [1]_).
-
-        References
-        ----------
-        .. [1] RAxML Version 8: A tool for Phylogenetic Analysis and
-           Post-Analysis of Large Phylogenies". In Bioinformatics, 2014
-
-        Examples
-        --------
-        Define a sequence collection containing two sequences with IDs "abc"
-        and "def":
-
-        >>> from skbio import DNA, SequenceCollection
-        >>> sequences = [DNA('A--CCGT.', metadata={'id': "abc"}),
-        ...              DNA('.AACCG-GT.', metadata={'id': "def"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> s1.ids()
-        ['abc', 'def']
-
-        Update the IDs in the sequence collection, obtaining a new sequence
-        collection with IDs that are integer-based:
-
-        >>> s2, new_to_old_ids = s1.update_ids()
-        >>> s2.ids()
-        ['1', '2']
-
-        Alternatively, we can specify a function to map the current IDs to new
-        IDs. Let's define a function that appends ``'-new'`` to each ID:
-
-        >>> def id_mapper(ids):
-        ...     return [id_ + '-new' for id_ in ids]
-        >>> s3, new_to_old_ids = s1.update_ids(func=id_mapper)
-        >>> s3.ids()
-        ['abc-new', 'def-new']
-
-        We can also directly update the IDs with a new sequence of IDs:
-
-        >>> s4, new_to_old_ids = s1.update_ids(ids=['ghi', 'jkl'])
-        >>> s4.ids()
-        ['ghi', 'jkl']
-
-        """
-        if ids is not None and func is not None:
-            raise SequenceCollectionError("ids and func cannot both be "
-                                          "provided.")
-        if (ids is not None and prefix) or (func is not None and prefix):
-            raise SequenceCollectionError("prefix cannot be provided if ids "
-                                          "or func is provided.")
-
-        if ids is not None:
-            def func(_):
-                return ids
-
-        elif func is None:
-            def func(_):
-                new_ids = []
-                for i in range(1, len(self) + 1):
-                    new_ids.append("%s%d" % (prefix, i))
-                return new_ids
-
-        old_ids = self.ids()
-        new_ids = func(old_ids)
-
-        if len(new_ids) != len(old_ids):
-            raise SequenceCollectionError(
-                "Number of new IDs must be equal to the number of existing "
-                "IDs (%d != %d)." % (len(new_ids), len(old_ids)))
-
-        new_to_old_ids = dict(zip(new_ids, old_ids))
-
-        new_seqs = []
-        for new_id, seq in zip(new_ids, self):
-            new_seq = seq.copy()
-            new_seq.metadata['id'] = new_id
-            new_seqs.append(new_seq)
-
-        return self.__class__(new_seqs), new_to_old_ids
-
-    @experimental(as_of="0.4.0")
-    def is_empty(self):
-        """Return True if the SequenceCollection is empty
-
-        Returns
-        -------
-        bool
-            ``True`` if `self` contains zero sequences, and ``False``
-            otherwise.
-
-        """
-        return self.sequence_count() == 0
-
-    @experimental(as_of="0.4.0")
-    def iteritems(self):
-        """Generator of id, sequence tuples
-
-        Returns
-        -------
-        generator of tuples
-            Each tuple contains ordered (`skbio.Sequence.id`, `skbio.Sequence`)
-            pairs.
-
-        """
-        for seq in self:
-            yield seq.metadata['id'], seq
-
-    @experimental(as_of="0.4.0")
-    def sequence_count(self):
-        """Return the count of sequences in the `SequenceCollection`
-
-        Returns
-        -------
-        int
-            The number of sequences in the `SequenceCollection`.
-
-        See Also
-        --------
-        sequence_lengths
-        Alignment.sequence_length
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection
-        >>> from skbio import DNA
-        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
-        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> print(s1.sequence_count())
-        2
-
-        """
-        return len(self._data)
-
-    @experimental(as_of="0.4.0")
-    def kmer_frequencies(self, k, overlap=True, relative=False):
-        """Return k-word frequencies for sequences in ``SequenceCollection``.
-
-        Parameters
-        ----------
-        k : int
-            The word length.
-        overlapping : bool, optional
-            Defines whether the k-words should be overlapping or not
-            overlapping. This is only relevant when `k` > 1.
-
-        Returns
-        -------
-        list
-            List of ``collections.Counter`` objects, one for each sequence
-            in the ``SequenceCollection``, representing the frequency of each
-            k-word in each sequence of the ``SequenceCollection``.
-
-        See Also
-        --------
-        Alignment.position_frequencies
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection, DNA
-        >>> sequences = [DNA('A', metadata={'id': "seq1"}),
-        ...              DNA('AT', metadata={'id': "seq2"}),
-        ...              DNA('TTTT', metadata={'id': "seq3"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> for freqs in s1.kmer_frequencies(1):
-        ...     print(freqs)
-        Counter({'A': 1})
-        Counter({'A': 1, 'T': 1})
-        Counter({'T': 4})
-        >>> for freqs in s1.kmer_frequencies(2):
-        ...     print(freqs)
-        Counter()
-        Counter({'AT': 1})
-        Counter({'TT': 3})
-
-        """
-        return [s.kmer_frequencies(k, overlap=overlap, relative=relative)
-                for s in self]
-
-    @experimental(as_of="0.4.0")
-    def sequence_lengths(self):
-        """Return lengths of the sequences in the `SequenceCollection`
-
-        Returns
-        -------
-        list
-            The ordered list of sequence lengths.
-
-        See Also
-        --------
-        sequence_count
-
-        Examples
-        --------
-        >>> from skbio import SequenceCollection
-        >>> from skbio import DNA
-        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
-        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
-        >>> s1 = SequenceCollection(sequences)
-        >>> print(s1.sequence_lengths())
-        [5, 7]
-
-        """
-        return [len(seq) for seq in self]
-
-
-class Alignment(SequenceCollection):
-    """Class for storing alignments of biological sequences.
-
-    The ``Alignment`` class adds methods to the ``SequenceCollection`` class
-    that are useful for working with aligned biological sequences.
-
-    Parameters
-    ----------
-    seqs : list of `skbio.Sequence` objects
-        The `skbio.Sequence` objects to load into a new `Alignment` object.
-    validate : bool, optional
-        If True, runs the `is_valid` method after construction and raises
-        `SequenceCollectionError` if ``is_valid == False``.
-    score : float, optional
-        The score of the alignment, if applicable (usually only if the
-        alignment was just constructed).
-    start_end_positions : iterable of two-item tuples, optional
-        The start and end positions of each input sequence in the alignment,
-        if applicable (usually only if the alignment was just constructed using
-        a local alignment algorithm). Note that these should be indexes into
-        the unaligned sequences, though the `Alignment` object itself doesn't
-        know about these unless it is degapped.
-
-    Raises
-    ------
-    skbio.SequenceCollectionError
-        If ``validate == True`` and ``is_valid == False``.
-    skbio.AlignmentError
-        If not all the sequences have the same length.
-
-    Notes
-    -----
-    By definition, all of the sequences in an alignment must be of the same
-    length. For this reason, an alignment can be thought of as a matrix of
-    sequences (rows) by positions (columns).
-
-    See Also
-    --------
-    skbio
-    skbio.DNA
-    skbio.RNA
-    skbio.Protein
-    SequenceCollection
-
-    Examples
-    --------
-    >>> from skbio import Alignment
-    >>> from skbio import DNA
-    >>> sequences = [DNA('A--CCGT', metadata={'id': "seq1"}),
-    ...              DNA('AACCGGT', metadata={'id': "seq2"})]
-    >>> a1 = Alignment(sequences)
-    >>> a1
-    <Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
-
-    """
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, seqs, score=None, start_end_positions=None):
-        super(Alignment, self).__init__(seqs)
-
-        if not self._validate_lengths():
-            raise AlignmentError("All sequences need to be of equal length.")
-
-        if score is not None:
-            self._score = float(score)
-        self._start_end_positions = start_end_positions
-
-    @experimental(as_of="0.4.0")
-    def distances(self, distance_fn=None):
-        """Compute distances between all pairs of sequences
-
-        Parameters
-        ----------
-        distance_fn : function, optional
-            Function for computing the distance between a pair of sequences.
-            This must take two sequences as input (as `skbio.Sequence` objects)
-            and return a single integer or float value. Defaults to the default
-            distance function used by `skbio.Sequence.distance`.
-
-        Returns
-        -------
-        skbio.DistanceMatrix
-            Matrix containing the distances between all pairs of sequences.
-
-        See Also
-        --------
-        skbio.Sequence.distance
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> seqs = [DNA("A-CCGGG", metadata={'id': "s1"}),
-        ...         DNA("ATCC--G", metadata={'id': "s2"}),
-        ...         DNA("ATCCGGA", metadata={'id': "s3"})]
-        >>> a1 = Alignment(seqs)
-        >>> print(a1.distances())
-        3x3 distance matrix
-        IDs:
-        's1', 's2', 's3'
-        Data:
-        [[ 0.          0.42857143  0.28571429]
-         [ 0.42857143  0.          0.42857143]
-         [ 0.28571429  0.42857143  0.        ]]
-
-        """
-        return super(Alignment, self).distances(distance_fn)
-
-    @experimental(as_of="0.4.0")
-    def score(self):
-        """Returns the score of the alignment.
-
-        Returns
-        -------
-        float, None
-            The score of the alignment, or ``None`` if this was not provided on
-            object construction.
-
-        Notes
-        -----
-        This value will often be ``None``, as it is generally only going to be
-        provided on construction if the alignment itself was built within
-        scikit-bio.
-
-        """
-        return self._score
-
-    @experimental(as_of="0.4.0")
-    def start_end_positions(self):
-        """Returns the (start, end) positions for each aligned sequence.
-
-        Returns
-        -------
-        list, None
-            The list of sequence start/end positions, or ``None`` if this was
-            not provided on object construction.
-
-        Notes
-        -----
-        The start/end positions indicate the range of the unaligned sequences
-        in the alignment. For example, if local alignment were performed on the
-        sequences ACA and TACAT, depending on the specific algorithm that was
-        used to perform the alignment, the start/end positions would likely be:
-        ``[(0,2), (1,3)]``. This indicates that the first and last positions of
-        the second sequence were not included in the alignment, and the
-        aligned sequences were therefore:
-        ACA
-        ACA
-
-        This value will often be ``None``, as it is generally only going to be
-        provided on construction if the alignment itself was built within
-        scikit-bio.
-
-        """
-        return self._start_end_positions
-
-    @experimental(as_of="0.4.0")
-    def subalignment(self, seqs_to_keep=None, positions_to_keep=None,
-                     invert_seqs_to_keep=False,
-                     invert_positions_to_keep=False):
-        """Returns new `Alignment` that is a subset of the current `Alignment`
-
-        Parameters
-        ----------
-        seqs_to_keep : list, optional
-            A list of sequence ids to be retained in the resulting
-            `Alignment`. If this is not passed, the default will be to retain
-            all sequences.
-        positions_to_keep : list, optional
-            A list of position indices to be retained in the resulting
-            `Alignment`. If this is not passed, the default will be to retain
-            all positions.
-        invert_seqs_to_keep : bool, optional
-            If `True`, the sequences identified in `seqs_to_keep` will be
-            discarded, rather than retained.
-        invert_positions_to_keep : bool, optional
-            If `True`, the sequences identified in `positions_to_keep` will be
-            discarded, rather than retained.
-
-        Returns
-        -------
-        Alignment
-            The specified subalignment.
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> seqs = [DNA("A-CCGGG", metadata={'id': "s1"}),
-        ...         DNA("ATCC--G", metadata={'id': "s2"}),
-        ...         DNA("ATCCGGA", metadata={'id': "s3"})]
-        >>> a1 = Alignment(seqs)
-        >>> a1
-        <Alignment: n=3; mean +/- std length=7.00 +/- 0.00>
-        >>> a1.subalignment(seqs_to_keep=["s1", "s2"])
-        <Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
-        >>> a1.subalignment(seqs_to_keep=["s1", "s2"],
-        ...         invert_seqs_to_keep=True)
-        <Alignment: n=1; mean +/- std length=7.00 +/- 0.00>
-        >>> a1.subalignment(positions_to_keep=[0, 2, 3, 5])
-        <Alignment: n=3; mean +/- std length=4.00 +/- 0.00>
-        >>> a1.subalignment(positions_to_keep=[0, 2, 3, 5],
-        ...         invert_positions_to_keep=True)
-        <Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
-        >>> a1.subalignment(seqs_to_keep=["s1", "s2"],
-        ...         positions_to_keep=[0, 2, 3, 5])
-        <Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
-
-        """
-        # if seqs_to_keep was not passed
-        if seqs_to_keep is None:
-            # and invert_seqs_to_keep is True
-            if invert_seqs_to_keep:
-                # return an empty alignment (because we're inverting the
-                # default of keeping all sequences)
-                return self.__class__([])
-            # else if invert_seqs_to_keep is False
-            else:
-                # default to returning all sequences
-                def keep_seq(i, id):
-                    return True
-        # else, if seqs_to_keep was passed
-        else:
-            seqs_to_keep = set(seqs_to_keep)
-            # and invert_seqs_to_keep is True
-            if invert_seqs_to_keep:
-                # keep only sequences that were not listed in seqs_to_keep
-                def keep_seq(i, id):
-                    return not (id in seqs_to_keep or
-                                i in seqs_to_keep)
-            # else if invert_seqs_to_keep is False
-            else:
-                # keep only sequences that were listed in seqs_to_keep
-                def keep_seq(i, id):
-                    return (id in seqs_to_keep or
-                            i in seqs_to_keep)
-
-        # if positions_to_keep was not passed
-        if positions_to_keep is None:
-            # and invert_positions_to_keep is True
-            if invert_positions_to_keep:
-                # return an empty alignment (because we're inverting the
-                # default of keeping all positions)
-                return self.__class__([])
-            # else if invert_positions_to_keep is False
-            else:
-                # default to returning all positions
-                def keep_position(pos):
-                    return True
-        # else, if positions_to_keep was passed
-        else:
-            positions_to_keep = set(positions_to_keep)
-            # and invert_positions_to_keep is True
-            if invert_positions_to_keep:
-                # keep only positions that were not listed in
-                # positions_to_keep
-                def keep_position(pos):
-                    return pos not in positions_to_keep
-            # else if invert_positions_to_keep is False
-            else:
-                # keep only sequences that were listed in positions_to_keep
-                def keep_position(pos):
-                    return pos in positions_to_keep
-
-        # prep the result object
-        result = []
-        # indices to keep
-        indices = [
-            i for i in range(self.sequence_length()) if keep_position(i)]
-        # iterate over sequences
-        for sequence_index, seq in enumerate(self):
-            # determine if we're keeping the current sequence
-            if keep_seq(sequence_index, seq.metadata['id']):
-                # slice the current sequence with the indices
-                result.append(seq[indices])
-            # if we're not keeping the current sequence, move on to the next
-            else:
-                continue
-        # pack the result up in the same type of object as the current object
-        # and return it
-        return self.__class__(result)
-
-    @experimental(as_of="0.4.0")
-    def iter_positions(self, constructor=None):
-        """Generator of Alignment positions (i.e., columns)
-
-        Parameters
-        ----------
-        constructor : type, optional
-            Constructor function for creating the positional values. By
-            default, these will be the same type as corresponding
-            `skbio.Sequence` in the `Alignment` object, but
-            you can pass a `skbio.Sequence` class here to ensure that they are
-            all of consistent type, or ``str`` to have them returned as
-            strings.
-
-        Returns
-        -------
-        GeneratorType
-            Generator of lists of positional values in the `Alignment`
-            (effectively the transpose of the alignment).
-
-        See Also
-        --------
-        iter
-
-        Examples
-        --------
-        >>> from skbio import DNA, Alignment
-        >>> sequences = [DNA('ACCGT--', metadata={'id': "seq1"}),
-        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
-        >>> aln = Alignment(sequences)
-        >>> for position in aln.iter_positions():
-        ...     for seq in position:
-        ...         print(seq.metadata['id'], seq)
-        ...     print('')
-        seq1 A
-        seq2 A
-        <BLANKLINE>
-        seq1 C
-        seq2 A
-        <BLANKLINE>
-        seq1 C
-        seq2 C
-        <BLANKLINE>
-        seq1 G
-        seq2 C
-        <BLANKLINE>
-        seq1 T
-        seq2 G
-        <BLANKLINE>
-        seq1 -
-        seq2 G
-        <BLANKLINE>
-        seq1 -
-        seq2 T
-        <BLANKLINE>
-
-        >>> for position in aln.iter_positions(constructor=str):
-        ...     position
-        ['A', 'A']
-        ['C', 'A']
-        ['C', 'C']
-        ['G', 'C']
-        ['T', 'G']
-        ['-', 'G']
-        ['-', 'T']
-
-        """
-        if constructor is None:
-            def constructor(s):
-                return s
-        for i in range(self.sequence_length()):
-            position = [constructor(seq[i]) for seq in self]
-            yield position
-
-    @experimental(as_of="0.4.0")
-    def majority_consensus(self):
-        """Return the majority consensus sequence for the alignment.
-
-        Returns
-        -------
-        skbio.Sequence
-            The consensus sequence of the `Alignment`. In other words, at each
-            position the most common character is chosen, and those characters
-            are combined to create a new sequence. The sequence will not have
-            its ID, description, or quality set; only the sequence will be set.
-            The type of biological sequence that is returned will be the same
-            type as the first sequence in the alignment, or ``Sequence`` if the
-            alignment is empty.
-
-        Notes
-        -----
-        If there are two characters that are equally abundant in the sequence
-        at a given position, the choice of which of those characters will be
-        present at that position in the result is arbitrary.
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
-        ...              DNA('AT-C', metadata={'id': "seq2"}),
-        ...              DNA('TT-C', metadata={'id': "seq3"})]
-        >>> a1 = Alignment(sequences)
-        >>> a1.majority_consensus()
-        DNA
-        -----------------------------
-        Stats:
-            length: 4
-            has gaps: True
-            has degenerates: False
-            has non-degenerates: True
-            GC-content: 33.33%
-        -----------------------------
-        0 AT-C
-
-        """
-        if self.is_empty():
-            seq_constructor = Sequence
-        else:
-            seq_constructor = self[0].__class__
-
-        # Counter.most_common returns an ordered list of the n most common
-        # (sequence, count) items in Counter. Here we set n=1, and take only
-        # the character, not the count.
-        return seq_constructor(''.join(c.most_common(1)[0][0]
-                               for c in self.position_counters()))
-
-    @experimental(as_of="0.4.0")
-    def omit_gap_positions(self, maximum_gap_frequency):
-        """Returns Alignment with positions filtered based on gap frequency
-
-        Parameters
-        ----------
-        maximum_gap_frequency : float
-            The maximum fraction of the sequences that can contain a gap at a
-            given position for that position to be retained in the resulting
-            `Alignment`.
-
-        Returns
-        -------
-        Alignment
-            The subalignment containing only the positions with gaps in fewer
-            than (or equal to) `maximum_gap_frequency` fraction of the
-            sequences.
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
-        ...              DNA('AT-C', metadata={'id': "seq2"}),
-        ...              DNA('TT-C', metadata={'id': "seq3"})]
-        >>> a1 = Alignment(sequences)
-        >>> a2 = a1.omit_gap_positions(0.50)
-        >>> a2
-        <Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
-        >>> print(a2[0])
-        AC-
-        >>> print(a2[1])
-        ATC
-        >>> print(a2[2])
-        TTC
-
-        """
-        # handle empty Alignment case
-        if self.is_empty():
-            return self.__class__([])
-
-        position_frequencies = self.position_frequencies()
-        gap_chars = self[0].gap_chars
-
-        positions_to_keep = []
-        for i, f in enumerate(position_frequencies):
-            gap_frequency = sum([f[c] for c in gap_chars])
-            if gap_frequency <= maximum_gap_frequency:
-                positions_to_keep.append(i)
-        return self.subalignment(positions_to_keep=positions_to_keep)
-
-    @experimental(as_of="0.4.0")
-    def omit_gap_sequences(self, maximum_gap_frequency):
-        """Returns Alignment with sequences filtered based on gap frequency
-
-        Parameters
-        ----------
-        maximum_gap_frequency : float
-            The maximum fraction of the positions that can contain a gap in a
-            given sequence for that sequence to be retained in the resulting
-            `Alignment`.
-
-        Returns
-        -------
-        Alignment
-            The subalignment containing only the sequences with gaps in fewer
-            than (or equal to) `maximum_gap_frequency` fraction of the
-            positions.
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
-        ...              DNA('AT-C', metadata={'id': "seq2"}),
-        ...              DNA('TT-C', metadata={'id': "seq3"})]
-        >>> a1 = Alignment(sequences)
-        >>> a2 = a1.omit_gap_sequences(0.49)
-        >>> a2
-        <Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
-        >>> print(a2[0])
-        AT-C
-        >>> print(a2[1])
-        TT-C
-
-        """
-        # handle empty Alignment case
-        if self.is_empty():
-            return self.__class__([])
-
-        base_frequencies = self.kmer_frequencies(k=1, relative=True)
-        gap_chars = self[0].gap_chars
-        seqs_to_keep = []
-        for seq, f in zip(self, base_frequencies):
-            gap_frequency = sum([f[c] for c in gap_chars])
-            if gap_frequency <= maximum_gap_frequency:
-                seqs_to_keep.append(seq.metadata['id'])
-        return self.subalignment(seqs_to_keep=seqs_to_keep)
-
-    @experimental(as_of="0.4.0")
-    def position_counters(self):
-        """Return counts of characters at each position in the alignment
-
-        Returns
-        -------
-        list
-            List of ``collections.Counter`` objects, one for each position in
-            the `Alignment`.
-
-        See Also
-        --------
-        position_frequencies
-        position_entropies
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
-        ...              DNA('AT-C', metadata={'id': "seq2"}),
-        ...              DNA('TT-C', metadata={'id': "seq3"})]
-        >>> a1 = Alignment(sequences)
-        >>> for counter in a1.position_counters():
-        ...     print(counter)
-        Counter({'A': 2, 'T': 1})
-        Counter({'T': 2, 'C': 1})
-        Counter({'-': 3})
-        Counter({'C': 2, '-': 1})
-
-        """
-        return [Counter(p) for p in self.iter_positions(constructor=str)]
-
-    @experimental(as_of="0.4.0")
-    def position_frequencies(self):
-        """Return frequencies of characters for positions in Alignment
-
-        Returns
-        -------
-        list
-            List of ``collection.defaultdict`` objects, one for each position
-            in the `Alignment`, representing the frequency of each character in
-            the `Alignment` at that position.
-
-        See Also
-        --------
-        position_counters
-        position_entropies
-        kmer_frequencies
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
-        ...              DNA('AT-C', metadata={'id': "seq2"}),
-        ...              DNA('TT-C', metadata={'id': "seq3"})]
-        >>> a1 = Alignment(sequences)
-        >>> position_freqs = a1.position_frequencies()
-        >>> round(position_freqs[0]['A'], 3)
-        0.667
-        >>> round(position_freqs[1]['A'], 3)
-        0.0
-
-        """
-        seq_count = self.sequence_count()
-        result = []
-        for pos_counter in self.position_counters():
-            freqs = defaultdict(float)
-            for char, count in viewitems(pos_counter):
-                freqs[char] = count / seq_count
-            result.append(freqs)
-        return result
-
-    @experimental(as_of="0.4.0")
-    def position_entropies(self, base=None,
-                           nan_on_non_standard_chars=True):
-        """Return Shannon entropy of positions in Alignment
-
-        Parameters
-        ----------
-        base : float, optional
-            Log base for entropy calculation. If not passed, default will be e
-            (i.e., natural log will be computed).
-        nan_on_non_standard_chars : bool, optional
-            If True, the entropy at positions containing characters outside of
-            the first sequence's `iupac_standard_characters` will be `np.nan`.
-            This is useful, and the default behavior, as it's not clear how a
-            gap or degenerate character should contribute to a positional
-            entropy. This issue was described in [1]_.
-
-        Returns
-        -------
-        list
-            List of floats of Shannon entropy at `Alignment` positions. Shannon
-            entropy is defined in [2]_.
-
-        See Also
-        --------
-        position_counters
-        position_frequencies
-
-        References
-        ----------
-        .. [1] Identifying DNA and protein patterns with statistically
-           significant alignments of multiple sequences.
-           Hertz GZ, Stormo GD.
-           Bioinformatics. 1999 Jul-Aug;15(7-8):563-77.
-        .. [2] A Mathematical Theory of Communication
-           CE Shannon
-           The Bell System Technical Journal (1948).
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> sequences = [DNA('AA--', metadata={'id': "seq1"}),
-        ...              DNA('AC-C', metadata={'id': "seq2"}),
-        ...              DNA('AT-C', metadata={'id': "seq3"}),
-        ...              DNA('TG-C', metadata={'id': "seq4"})]
-        >>> a1 = Alignment(sequences)
-        >>> print(a1.position_entropies())
-        [0.56233514461880829, 1.3862943611198906, nan, nan]
-
-        """
-        result = []
-        # handle empty Alignment case
-        if self.is_empty():
-            return result
-
-        iupac_standard_characters = self[0].nondegenerate_chars
-        for f in self.position_frequencies():
-            if (nan_on_non_standard_chars and
-                    len(viewkeys(f) - iupac_standard_characters) > 0):
-                result.append(np.nan)
-            else:
-                result.append(entropy(list(f.values()), base=base))
-        return result
-
-    @experimental(as_of="0.4.0")
-    def sequence_length(self):
-        """Return the number of positions in Alignment
-
-        Returns
-        -------
-        int
-            The number of positions in `Alignment`.
-
-        See Also
-        --------
-        sequence_lengths
-        sequence_count
-
-        Examples
-        --------
-        >>> from skbio import Alignment
-        >>> from skbio import DNA
-        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
-        ...              DNA('AT-C', metadata={'id': "seq2"}),
-        ...              DNA('TT-C', metadata={'id': "seq3"})]
-        >>> a1 = Alignment(sequences)
-        >>> a1.sequence_length()
-        4
-
-        """
-        # handle the empty Alignment case
-        if self.is_empty():
-            return 0
-        else:
-            return len(self._data[0])
-
-    def _validate_lengths(self):
-        """Return ``True`` if all sequences same length, ``False`` otherwise
-        """
-        seq1_length = self.sequence_length()
-        for seq in self:
-            if seq1_length != len(seq):
-                return False
-        return True
diff --git a/skbio/alignment/_exception.py b/skbio/alignment/_exception.py
deleted file mode 100644
index b4c447e..0000000
--- a/skbio/alignment/_exception.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-
-class SequenceCollectionError(Exception):
-    """General error for sequence collection validation failures."""
-    pass
-
-
-class AlignmentError(SequenceCollectionError):
-    """General error for alignment validation failures."""
-    pass
diff --git a/skbio/alignment/_indexing.py b/skbio/alignment/_indexing.py
new file mode 100644
index 0000000..d8bbe4c
--- /dev/null
+++ b/skbio/alignment/_indexing.py
@@ -0,0 +1,219 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import with_metaclass
+from abc import ABCMeta, abstractmethod
+
+import numpy as np
+import pandas as pd
+
+
+class _Indexing(with_metaclass(ABCMeta, object)):
+    def __init__(self, instance, axis=None):
+        self._obj = instance
+        self._axis = axis
+
+    def __call__(self, axis=None):
+        """Set the axis to index on."""
+        # verify axis param, discard value
+        self._obj._is_sequence_axis(axis)
+        return self.__class__(self._obj, axis=axis)
+
+    def __getitem__(self, indexable):
+        if self._axis is not None:
+            if self._obj._is_sequence_axis(self._axis):
+                return self._slice_on_first_axis(self._obj, indexable)
+            else:
+                return self._slice_on_second_axis(self._obj, indexable)
+
+        if type(indexable) is tuple:
+            if len(indexable) > 2:
+                raise ValueError("Can only slice on two axes. Tuple is length:"
+                                 " %r" % len(indexable))
+            elif len(indexable) > 1:
+                return self._handle_both_axes(*indexable)
+            else:
+                indexable, = indexable
+
+        return self._slice_on_first_axis(self._obj, indexable)
+
+    def _handle_both_axes(self, seq_index, pos_index):
+        seq_index = self._convert_ellipsis(seq_index)
+        pos_index = self._convert_ellipsis(pos_index)
+
+        if not hasattr(seq_index, '__iter__') and seq_index == slice(None):
+            # Only slice second axis
+            return self._slice_on_second_axis(self._obj, pos_index)
+        else:
+            r = self._slice_on_first_axis(self._obj, seq_index)
+            if type(r) is self._obj.dtype:
+                # [1, 1] [1, *]
+                return r[pos_index]
+            else:
+                # [*, 1] [*, *]
+                return self._slice_on_second_axis(r, pos_index)
+
+    def _slice_on_second_axis(self, obj, indexable):
+        indexable = self._convert_ellipsis(indexable)
+        if self.is_scalar(indexable, axis=1):
+            # [..., 1]
+            return self._get_position(obj, indexable)
+        else:
+            # [..., *]
+            return self._slice_positions(obj, indexable)
+
+    def _slice_on_first_axis(self, obj, indexable):
+        indexable = self._convert_ellipsis(indexable)
+        if self.is_scalar(indexable, axis=0):
+            # [1]
+            return self._get_sequence(obj, indexable)
+        else:
+            # [*]
+            return self._slice_sequences(obj, indexable)
+
+    def _convert_ellipsis(self, indexable):
+        if indexable is Ellipsis:
+            return slice(None)
+        return indexable
+
+    @abstractmethod
+    def is_scalar(self, indexable, axis):
+        pass
+
+    @abstractmethod
+    def _get_sequence(self, obj, indexable):
+        pass
+
+    @abstractmethod
+    def _slice_sequences(self, obj, indexable):
+        pass
+
+    def _get_position(self, obj, indexable):
+        return obj._get_position_(indexable)
+
+    def _slice_positions(self, obj, indexable):
+        indexable = self._assert_bool_vector_right_size(indexable, axis=1)
+        indexable = self._convert_iterable_of_slices(indexable)
+        return obj._slice_positions_(indexable)
+
+    def _convert_iterable_of_slices(self, indexable):
+        # _assert_bool_vector_right_size will have converted the iterable to
+        # an ndarray if it wasn't yet.
+        if isinstance(indexable, np.ndarray) and indexable.dtype == object:
+            indexable = np.r_[tuple(indexable)]
+
+        return indexable
+
+    def _assert_bool_vector_right_size(self, indexable, axis):
+        if isinstance(indexable, np.ndarray):
+            pass
+        elif hasattr(indexable, '__iter__'):
+            indexable = np.asarray(list(indexable))
+        else:
+            return indexable
+
+        if indexable.dtype == bool and len(indexable) != self._obj.shape[axis]:
+            raise IndexError("Boolean index's length (%r) does not match the"
+                             " axis length (%r)" % (len(indexable),
+                                                    self._obj.shape[axis]))
+
+        return indexable
+
+
+class TabularMSAILoc(_Indexing):
+    def is_scalar(self, indexable, axis):
+        return np.isscalar(indexable)
+
+    def _get_sequence(self, obj, indexable):
+        return obj._get_sequence_iloc_(indexable)
+
+    def _slice_sequences(self, obj, indexable):
+        indexable = self._assert_bool_vector_right_size(indexable, axis=0)
+        indexable = self._convert_iterable_of_slices(indexable)
+        return obj._slice_sequences_iloc_(indexable)
+
+
+class TabularMSALoc(_Indexing):
+    def is_scalar(self, indexable, axis):
+        """
+        Sometimes (MultiIndex!) something that looks like a scalar, isn't
+        and vice-versa.
+
+        Consider:
+
+        A 0
+          1
+          2
+        B 0
+          1
+          2
+
+        'A' looks like a scalar, but isn't.
+        ('A', 0) doesn't look like a scalar, but it is.
+        """
+        index = self._obj.index
+        complete_key = False
+        partial_key = False
+        duplicated_key = False
+        if axis == 0 and self._has_fancy_index():
+            try:
+                if type(indexable) is tuple:
+                    complete_key = (len(indexable) == len(index.levshape) and
+                                    indexable in index)
+                partial_key = not complete_key and indexable in index
+            except TypeError:  # Unhashable type, no biggie
+                pass
+        if index.has_duplicates:
+            duplicated_key = indexable in index.get_duplicates()
+        return (not duplicated_key and
+                ((np.isscalar(indexable) and not partial_key) or complete_key))
+
+    def _get_sequence(self, obj, indexable):
+        self._assert_tuple_rules(indexable)
+        return obj._get_sequence_loc_(indexable)
+
+    def _slice_sequences(self, obj, indexable):
+        self._assert_tuple_rules(indexable)
+        if (self._has_fancy_index() and
+                type(indexable) is not tuple and
+                pd.core.common.is_list_like(indexable) and
+                len(indexable) > 0):
+            if not self.is_scalar(indexable[0], axis=0):
+                raise TypeError("A list is used with complete labels, try"
+                                " using a tuple to indicate independent"
+                                " selections of a `pd.MultiIndex`.")
+            # prevents
+            # pd.Series.loc[['x', 'b', 'b', 'a']] from being interepereted as
+            # pd.Series.loc[[('a', 0), ('b', 1)]] who knows why it does this.
+            elif indexable[0] not in self._obj.index:
+                raise KeyError(repr(indexable[0]))
+            # pandas acts normal if the first element is actually a scalar
+
+        self._assert_bool_vector_right_size(indexable, axis=0)
+        return obj._slice_sequences_loc_(indexable)
+
+    def _assert_tuple_rules(self, indexable):
+        # pandas is scary in what it will accept sometimes...
+        if type(indexable) is tuple:
+            if not self._has_fancy_index():
+                # prevents unfriendly errors
+                raise TypeError("Cannot provide a tuple to the first axis of"
+                                " `loc` unless the MSA's `index` is a"
+                                " `pd.MultiIndex`.")
+            elif self.is_scalar(indexable[0], axis=0):
+                # prevents unreasonable results
+                # pd.Series.loc[('a', 0), ('b', 1)] would be interpreted as
+                # pd.Series.loc[('a', 1)] which is horrifying.
+                raise TypeError("A tuple provided to the first axis of `loc`"
+                                " represents a selection for each index of a"
+                                " `pd.MultiIndex`; it should not contain a"
+                                " complete label.")
+
+    def _has_fancy_index(self):
+        return hasattr(self._obj.index, 'levshape')
diff --git a/skbio/alignment/_pairwise.py b/skbio/alignment/_pairwise.py
index bfe0131..8bd6c8a 100644
--- a/skbio/alignment/_pairwise.py
+++ b/skbio/alignment/_pairwise.py
@@ -12,11 +12,10 @@ from itertools import product
 
 import numpy as np
 from future.builtins import range, zip
-from six import string_types
 
-from skbio.alignment import Alignment
+from skbio.alignment import TabularMSA
 from skbio.alignment._ssw_wrapper import StripedSmithWaterman
-from skbio.sequence import Sequence, Protein
+from skbio.sequence import DNA, RNA, Protein
 from skbio.sequence._iupac_sequence import IUPACSequence
 from skbio.util import EfficiencyWarning
 from skbio.util._decorator import experimental, deprecated
@@ -133,9 +132,9 @@ def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
 
     Parameters
     ----------
-    seq1 : str or Sequence
+    seq1 : DNA or RNA
         The first unaligned sequence.
-    seq2 : str or Sequence
+    seq2 : DNA or RNA
         The second unaligned sequence.
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -157,9 +156,11 @@ def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
 
     Returns
     -------
-    skbio.Alignment
-        ``Alignment`` object containing the aligned sequences as well as
-        details about the alignment.
+    tuple
+        ``TabularMSA`` object containing the aligned sequences, alignment score
+        (float), and start/end positions of each input sequence (iterable
+        of two-item tuples). Note that start/end positions are indexes into the
+        unaligned sequences.
 
     See Also
     --------
@@ -181,13 +182,17 @@ def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
     .. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
 
     """
+    for seq in seq1, seq2:
+        if not isinstance(seq, (DNA, RNA)):
+            raise TypeError(
+                "`seq1` and `seq2` must be DNA or RNA, not type %r"
+                % type(seq).__name__)
+
     # use the substitution matrix provided by the user, or compute from
     # match_score and mismatch_score if a substitution matrix was not provided
     if substitution_matrix is None:
         substitution_matrix = \
             make_identity_substitution_matrix(match_score, mismatch_score)
-    else:
-        pass
 
     return local_pairwise_align(seq1, seq2, gap_open_penalty,
                                 gap_extend_penalty, substitution_matrix)
@@ -201,9 +206,9 @@ def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
 
     Parameters
     ----------
-    seq1 : str or Sequence
+    seq1 : Protein
         The first unaligned sequence.
-    seq2 : str or Sequence
+    seq2 : Protein
         The second unaligned sequence.
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -217,9 +222,11 @@ def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
 
     Returns
     -------
-    skbio.Alignment
-        ``Alignment`` object containing the aligned sequences as well as
-        details about the alignment.
+    tuple
+        ``TabularMSA`` object containing the aligned sequences, alignment score
+        (float), and start/end positions of each input sequence (iterable
+        of two-item tuples). Note that start/end positions are indexes into the
+        unaligned sequences.
 
     See Also
     --------
@@ -246,6 +253,12 @@ def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
        Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
 
     """
+    for seq in seq1, seq2:
+        if not isinstance(seq, Protein):
+            raise TypeError(
+                "`seq1` and `seq2` must be Protein, not type %r"
+                % type(seq).__name__)
+
     if substitution_matrix is None:
         substitution_matrix = blosum50
 
@@ -260,9 +273,9 @@ def local_pairwise_align(seq1, seq2, gap_open_penalty,
 
     Parameters
     ----------
-    seq1 : str or Sequence
+    seq1 : IUPACSequence
         The first unaligned sequence.
-    seq2 : str or Sequence
+    seq2 : IUPACSequence
         The second unaligned sequence.
     gap_open_penalty : int or float
         Penalty for opening a gap (this is substracted from previous best
@@ -276,9 +289,11 @@ def local_pairwise_align(seq1, seq2, gap_open_penalty,
 
     Returns
     -------
-    skbio.Alignment
-       ``Alignment`` object containing the aligned sequences as well as
-        details about the alignment.
+    tuple
+        ``TabularMSA`` object containing the aligned sequences, alignment score
+        (float), and start/end positions of each input sequence (iterable
+        of two-item tuples). Note that start/end positions are indexes into the
+        unaligned sequences.
 
     See Also
     --------
@@ -307,8 +322,19 @@ def local_pairwise_align(seq1, seq2, gap_open_penalty,
          "than skbio.alignment.local_pairwise_align_ssw.",
          EfficiencyWarning)
 
-    seq1 = _coerce_alignment_input_type(seq1, disallow_alignment=True)
-    seq2 = _coerce_alignment_input_type(seq2, disallow_alignment=True)
+    for seq in seq1, seq2:
+        if not isinstance(seq, IUPACSequence):
+            raise TypeError(
+                "`seq1` and `seq2` must be IUPACSequence subclasses, not type "
+                "%r" % type(seq).__name__)
+
+    if type(seq1) is not type(seq2):
+        raise TypeError(
+            "`seq1` and `seq2` must be the same type: %r != %r"
+            % (type(seq1).__name__, type(seq2).__name__))
+
+    seq1 = _coerce_alignment_input_type(seq1)
+    seq2 = _coerce_alignment_input_type(seq2)
 
     score_matrix, traceback_matrix = _compute_score_and_traceback_matrices(
         seq1, seq2, gap_open_penalty, gap_extend_penalty,
@@ -324,8 +350,9 @@ def local_pairwise_align(seq1, seq2, gap_open_penalty,
     start_end_positions = [(seq1_start_position, end_col_position-1),
                            (seq2_start_position, end_row_position-1)]
 
-    return Alignment(aligned1 + aligned2, score=score,
-                     start_end_positions=start_end_positions)
+    msa = TabularMSA(aligned1 + aligned2)
+
+    return msa, score, start_end_positions
 
 
 @experimental(as_of="0.4.0")
@@ -334,13 +361,13 @@ def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
                                      match_score=1, mismatch_score=-2,
                                      substitution_matrix=None,
                                      penalize_terminal_gaps=False):
-    """Globally align pair of nuc. seqs or alignments with Needleman-Wunsch
+    """Globally align nucleotide seqs or alignments with Needleman-Wunsch
 
     Parameters
     ----------
-    seq1 : str, Sequence, or Alignment
+    seq1 : DNA, RNA, or TabularMSA[DNA|RNA]
         The first unaligned sequence(s).
-    seq2 : str, Sequence, or Alignment
+    seq2 : DNA, RNA, or TabularMSA[DNA|RNA]
         The second unaligned sequence(s).
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -369,9 +396,11 @@ def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
 
     Returns
     -------
-    skbio.Alignment
-        ``Alignment`` object containing the aligned sequences as well as
-        details about the alignment.
+    tuple
+        ``TabularMSA`` object containing the aligned sequences, alignment score
+        (float), and start/end positions of each input sequence (iterable
+        of two-item tuples). Note that start/end positions are indexes into the
+        unaligned sequences.
 
     See Also
     --------
@@ -396,13 +425,22 @@ def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
     .. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
 
     """
+    for seq in seq1, seq2:
+        if not isinstance(seq, (DNA, RNA, TabularMSA)):
+            raise TypeError(
+                "`seq1` and `seq2` must be DNA, RNA, or TabularMSA, not type "
+                "%r" % type(seq).__name__)
+        if isinstance(seq, TabularMSA) and not issubclass(seq.dtype,
+                                                          (DNA, RNA)):
+            raise TypeError(
+                "`seq1` and `seq2` must be TabularMSA with DNA or RNA dtype, "
+                "not dtype %r" % seq.dtype.__name__)
+
     # use the substitution matrix provided by the user, or compute from
     # match_score and mismatch_score if a substitution matrix was not provided
     if substitution_matrix is None:
         substitution_matrix = \
             make_identity_substitution_matrix(match_score, mismatch_score)
-    else:
-        pass
 
     return global_pairwise_align(seq1, seq2, gap_open_penalty,
                                  gap_extend_penalty, substitution_matrix,
@@ -418,9 +456,9 @@ def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
 
     Parameters
     ----------
-    seq1 : str, Sequence, or Alignment
+    seq1 : Protein or TabularMSA[Protein]
         The first unaligned sequence(s).
-    seq2 : str, Sequence, or Alignment
+    seq2 : Protein or TabularMSA[Protein]
         The second unaligned sequence(s).
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -441,9 +479,11 @@ def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
 
     Returns
     -------
-    skbio.Alignment
-        ``Alignment`` object containing the aligned sequences as well as
-        details about the alignment.
+    tuple
+        ``TabularMSA`` object containing the aligned sequences, alignment score
+        (float), and start/end positions of each input sequence (iterable
+        of two-item tuples). Note that start/end positions are indexes into the
+        unaligned sequences.
 
     See Also
     --------
@@ -473,6 +513,16 @@ def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
        Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
 
     """
+    for seq in seq1, seq2:
+        if not isinstance(seq, (Protein, TabularMSA)):
+            raise TypeError(
+                "`seq1` and `seq2` must be Protein or TabularMSA, not type %r"
+                % type(seq).__name__)
+        if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, Protein):
+            raise TypeError(
+                "`seq1` and `seq2` must be TabularMSA with Protein dtype, "
+                "not dtype %r" % seq.dtype.__name__)
+
     if substitution_matrix is None:
         substitution_matrix = blosum50
 
@@ -488,9 +538,9 @@ def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
 
     Parameters
     ----------
-    seq1 : str, Sequence, or Alignment
+    seq1 : IUPACSequence or TabularMSA
         The first unaligned sequence(s).
-    seq2 : str, Sequence, or Alignment
+    seq2 : IUPACSequence or TabularMSA
         The second unaligned sequence(s).
     gap_open_penalty : int or float
         Penalty for opening a gap (this is substracted from previous best
@@ -511,9 +561,11 @@ def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
 
     Returns
     -------
-    skbio.Alignment
-        ``Alignment`` object containing the aligned sequences as well as
-        details about the alignment.
+    tuple
+        ``TabularMSA`` object containing the aligned sequences, alignment score
+        (float), and start/end positions of each input sequence (iterable
+        of two-item tuples). Note that start/end positions are indexes into the
+        unaligned sequences.
 
     See Also
     --------
@@ -548,8 +600,22 @@ def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
          "version soon (see https://github.com/biocore/scikit-bio/issues/254 "
          "to track progress on this).", EfficiencyWarning)
 
-    seq1 = _coerce_alignment_input_type(seq1, disallow_alignment=False)
-    seq2 = _coerce_alignment_input_type(seq2, disallow_alignment=False)
+    for seq in seq1, seq2:
+        # We don't need to check the case where `seq` is a `TabularMSA` with a
+        # dtype that isn't a subclass of `IUPACSequence`, this is guaranteed by
+        # `TabularMSA`.
+        if not isinstance(seq, (IUPACSequence, TabularMSA)):
+            raise TypeError(
+                "`seq1` and `seq2` must be IUPACSequence subclasses or "
+                "TabularMSA, not type %r" % type(seq).__name__)
+
+    seq1 = _coerce_alignment_input_type(seq1)
+    seq2 = _coerce_alignment_input_type(seq2)
+
+    if seq1.dtype is not seq2.dtype:
+        raise TypeError(
+            "`seq1` and `seq2` must have the same dtype: %r != %r"
+            % (seq1.dtype.__name__, seq2.dtype.__name__))
 
     if penalize_terminal_gaps:
         init_matrices_f = _init_matrices_nw
@@ -572,28 +638,29 @@ def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
     start_end_positions = [(seq1_start_position, end_col_position-1),
                            (seq2_start_position, end_row_position-1)]
 
-    return Alignment(aligned1 + aligned2, score=score,
-                     start_end_positions=start_end_positions)
+    msa = TabularMSA(aligned1 + aligned2)
+
+    return msa, score, start_end_positions
 
 
 @experimental(as_of="0.4.0")
-def local_pairwise_align_ssw(sequence1, sequence2, constructor=Sequence,
-                             **kwargs):
+def local_pairwise_align_ssw(sequence1, sequence2, **kwargs):
     """Align query and target sequences with Striped Smith-Waterman.
 
     Parameters
     ----------
-    sequence1 : str or Sequence
+    sequence1 : DNA, RNA, or Protein
         The first unaligned sequence
-    sequence2 : str or Sequence
+    sequence2 : DNA, RNA, or Protein
         The second unaligned sequence
-    constructor : Sequence subclass
-        A constructor to use if `protein` is not True.
 
     Returns
     -------
-    ``skbio.alignment.Alignment``
-        The resulting alignment as an Alignment object
+    tuple
+        ``TabularMSA`` object containing the aligned sequences, alignment score
+        (float), and start/end positions of each input sequence (iterable
+        of two-item tuples). Note that start/end positions are indexes into the
+        unaligned sequences.
 
     Notes
     -----
@@ -602,8 +669,8 @@ def local_pairwise_align_ssw(sequence1, sequence2, constructor=Sequence,
     For a complete list of optional keyword-arguments that can be provided,
     see ``skbio.alignment.StripedSmithWaterman``.
 
-    The following kwargs will not have any effect: `suppress_sequences` and
-    `zero_index`
+    The following kwargs will not have any effect: `suppress_sequences`,
+    `zero_index`, and `protein`
 
     If an alignment does not meet a provided filter, `None` will be returned.
 
@@ -619,11 +686,23 @@ def local_pairwise_align_ssw(sequence1, sequence2, constructor=Sequence,
     skbio.alignment.StripedSmithWaterman
 
     """
-    # We need the sequences for `Alignment` to make sense, so don't let the
+    for seq in sequence1, sequence2:
+        if not isinstance(seq, (DNA, RNA, Protein)):
+            raise TypeError(
+                "`sequence1` and `sequence2` must be DNA, RNA, or Protein, "
+                "not type %r" % type(seq).__name__)
+
+    if type(sequence1) is not type(sequence2):
+        raise TypeError(
+            "`sequence1` and `sequence2` must be the same type: %r != %r"
+            % (type(sequence1).__name__, type(sequence2).__name__))
+
+    # We need the sequences for `TabularMSA` to make sense, so don't let the
     # user suppress them.
     kwargs['suppress_sequences'] = False
     kwargs['zero_index'] = True
 
+    kwargs['protein'] = False
     if isinstance(sequence1, Protein):
         kwargs['protein'] = True
 
@@ -640,26 +719,17 @@ def local_pairwise_align_ssw(sequence1, sequence2, constructor=Sequence,
             (alignment.query_begin, alignment.query_end),
             (alignment.target_begin, alignment.target_end_optimal)
         ]
-    if kwargs.get('protein', False):
-        seqs = [
-            Protein(alignment.aligned_query_sequence,
-                    metadata={'id': 'query'}),
-            Protein(alignment.aligned_target_sequence,
-                    metadata={'id': 'target'})
-        ]
-    else:
-        seqs = [
-            constructor(alignment.aligned_query_sequence,
-                        metadata={'id': 'query'}),
-            constructor(alignment.aligned_target_sequence,
-                        metadata={'id': 'target'})
-        ]
 
-    return Alignment(seqs, score=alignment.optimal_alignment_score,
-                     start_end_positions=start_end)
+    constructor = type(sequence1)
+    msa = TabularMSA([
+        constructor(alignment.aligned_query_sequence),
+        constructor(alignment.aligned_target_sequence)
+    ])
+
+    return msa, alignment.optimal_alignment_score, start_end
 
 
- at deprecated(as_of="0.4.0", until="0.4.1",
+ at deprecated(as_of="0.4.0", until="0.5.0",
             reason="Will be replaced by a SubstitutionMatrix class. To track "
                    "progress, see [#161]"
                    "(https://github.com/biocore/scikit-bio/issues/161).")
@@ -703,46 +773,19 @@ def make_identity_substitution_matrix(match_score, mismatch_score,
 # less clunky.
 
 
-def _coerce_alignment_input_type(seq, disallow_alignment):
-    """ Converts variety of types into an skbio.Alignment object
-    """
-    if isinstance(seq, string_types):
-        return Alignment([Sequence(seq, metadata={'id': ''})])
-    elif isinstance(seq, Sequence):
-        if 'id' in seq.metadata:
-            return Alignment([seq])
-        else:
-            seq = seq.copy()
-            seq.metadata['id'] = ''
-            return Alignment([seq])
-    elif isinstance(seq, Alignment):
-        if disallow_alignment:
-            # This will disallow aligning either a pair of alignments, or an
-            # alignment and a sequence. We don't currently support this for
-            # local alignment as there is not a clear usecase, and it's also
-            # not exactly clear how this would work.
-            raise TypeError("Aligning alignments is not currently supported "
-                            "with the aligner function that you're calling.")
-        else:
-            return seq
+def _coerce_alignment_input_type(seq):
+    if isinstance(seq, IUPACSequence):
+        return TabularMSA([seq])
     else:
-        raise TypeError(
-            "Unsupported type provided to aligner: %r." % type(seq))
+        return seq
 
 
 _traceback_encoding = {'match': 1, 'vertical-gap': 2, 'horizontal-gap': 3,
                        'uninitialized': -1, 'alignment-end': 0}
 
 
-def _get_seq_id(seq, default_id):
-    result = seq.metadata['id'] if 'id' in seq.metadata else default_id
-    if result is None or result.strip() == "":
-        result = default_id
-    return result
-
-
 def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
-    shape = (aln2.sequence_length()+1, aln1.sequence_length()+1)
+    shape = (aln2.shape.position+1, aln1.shape.position+1)
     score_matrix = np.zeros(shape)
     traceback_matrix = np.zeros(shape, dtype=np.int)
     traceback_matrix += _traceback_encoding['uninitialized']
@@ -752,7 +795,7 @@ def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
 
 
 def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
-    shape = (aln2.sequence_length()+1, aln1.sequence_length()+1)
+    shape = (aln2.shape.position+1, aln1.shape.position+1)
     score_matrix = np.zeros(shape)
     traceback_matrix = np.zeros(shape, dtype=np.int)
     traceback_matrix += _traceback_encoding['uninitialized']
@@ -775,7 +818,7 @@ def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
 
 def _init_matrices_nw_no_terminal_gap_penalty(
         aln1, aln2, gap_open_penalty, gap_extend_penalty):
-    shape = (aln2.sequence_length()+1, aln1.sequence_length()+1)
+    shape = (aln2.shape.position+1, aln1.shape.position+1)
     score_matrix = np.zeros(shape)
     traceback_matrix = np.zeros(shape, dtype=np.int)
     traceback_matrix += _traceback_encoding['uninitialized']
@@ -795,9 +838,8 @@ def _init_matrices_nw_no_terminal_gap_penalty(
 
 
 def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
-                                gap_substitution_score):
+                                gap_substitution_score, gap_chars):
     substitution_score = 0
-    gap_chars = IUPACSequence.gap_chars
     for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
         if aln1_char in gap_chars or aln2_char in gap_chars:
                 substitution_score += gap_substitution_score
@@ -841,8 +883,8 @@ def _compute_score_and_traceback_matrices(
     that users are most likely to be looking for.
 
     """
-    aln1_length = aln1.sequence_length()
-    aln2_length = aln2.sequence_length()
+    aln1_length = aln1.shape.position
+    aln2_length = aln2.shape.position
     # cache some values for quicker/simpler access
     aend = _traceback_encoding['alignment-end']
     match = _traceback_encoding['match']
@@ -858,14 +900,18 @@ def _compute_score_and_traceback_matrices(
 
     # Iterate over the characters in aln2 (which corresponds to the vertical
     # sequence in the matrix)
-    for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(str), 1):
+    for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(), 1):
+        aln2_chars = str(aln2_chars)
+
         # Iterate over the characters in aln1 (which corresponds to the
         # horizontal sequence in the matrix)
-        for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(str), 1):
+        for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(), 1):
+            aln1_chars = str(aln1_chars)
+
             # compute the score for a match/mismatch
             substitution_score = _compute_substitution_score(
                 aln1_chars, aln2_chars, substitution_matrix,
-                gap_substitution_score)
+                gap_substitution_score, aln1.dtype.gap_chars)
 
             diag_score = \
                 (score_matrix[aln2_pos-1, aln1_pos-1] + substitution_score,
@@ -916,18 +962,19 @@ def _compute_score_and_traceback_matrices(
 
 
 def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
-               start_col, gap_character='-'):
-    # cache some values for simpler
+               start_col):
+    # cache some values for simpler reference
     aend = _traceback_encoding['alignment-end']
     match = _traceback_encoding['match']
     vgap = _traceback_encoding['vertical-gap']
     hgap = _traceback_encoding['horizontal-gap']
+    gap_character = aln1.dtype.default_gap_char
 
     # initialize the result alignments
-    aln1_sequence_count = aln1.sequence_count()
+    aln1_sequence_count = aln1.shape.sequence
     aligned_seqs1 = [[] for e in range(aln1_sequence_count)]
 
-    aln2_sequence_count = aln2.sequence_count()
+    aln2_sequence_count = aln2.shape.sequence
     aligned_seqs2 = [[] for e in range(aln2_sequence_count)]
 
     current_row = start_row
@@ -948,7 +995,7 @@ def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
             current_col -= 1
         elif current_value == vgap:
             for aligned_seq in aligned_seqs1:
-                aligned_seq.append('-')
+                aligned_seq.append(gap_character)
             for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
                 aligned_seq.append(str(input_seq[current_row-1]))
             current_row -= 1
@@ -956,7 +1003,7 @@ def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
             for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
                 aligned_seq.append(str(input_seq[current_col-1]))
             for aligned_seq in aligned_seqs2:
-                aligned_seq.append('-')
+                aligned_seq.append(gap_character)
             current_col -= 1
         elif current_value == aend:
             continue
@@ -966,18 +1013,15 @@ def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
 
     for i in range(aln1_sequence_count):
         aligned_seq = ''.join(aligned_seqs1[i][::-1])
-        seq_id = _get_seq_id(aln1[i], str(i))
-        constructor = aln1[i].__class__
-        aligned_seqs1[i] = constructor(aligned_seq, metadata={'id': seq_id})
+        constructor = aln1.dtype
+        aligned_seqs1[i] = constructor(aligned_seq)
 
     for i in range(aln2_sequence_count):
         aligned_seq = ''.join(aligned_seqs2[i][::-1])
-        seq_id = _get_seq_id(aln2[i], str(i + aln1_sequence_count))
-        constructor = aln2[i].__class__
-        aligned_seqs2[i] = constructor(aligned_seq, metadata={'id': seq_id})
+        constructor = aln2.dtype
+        aligned_seqs2[i] = constructor(aligned_seq)
 
-    return (aligned_seqs1, aligned_seqs2, best_score,
-            current_col, current_row)
+    return aligned_seqs1, aligned_seqs2, best_score, current_col, current_row
 
 
 def _first_largest(scores):
diff --git a/skbio/alignment/_repr.py b/skbio/alignment/_repr.py
new file mode 100644
index 0000000..701aca6
--- /dev/null
+++ b/skbio/alignment/_repr.py
@@ -0,0 +1,68 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from skbio.util._metadata_repr import _MetadataReprBuilder
+
+
+class _TabularMSAReprBuilder(_MetadataReprBuilder):
+    def __init__(self, msa, width, indent):
+        super(_TabularMSAReprBuilder, self).__init__(msa, width, indent)
+        self._ellipse_insert = ' ... '
+
+    def _process_header(self):
+        cls_name = self._obj.__class__.__name__
+        if self._obj.dtype is not None:
+            dtype_class = '[' + self._obj.dtype.__name__ + ']'
+        else:
+            dtype_class = ''
+        self._lines.add_line(cls_name + dtype_class)
+        self._lines.add_separator()
+
+    def _process_data(self):
+        num_sequences = self._obj.shape.sequence
+        num_positions = self._obj.shape.position
+
+        # catch case of all empty sequences
+        if num_positions > 0:
+            # display all sequences if we can, else display the first two and
+            # last two sequences separated by ellipsis
+            if num_sequences <= 5:
+                self._lines.add_lines(
+                    self._format_sequences(range(num_sequences)))
+            else:
+                self._lines.add_lines(self._format_sequences(range(2)))
+                self._lines.add_line('...')
+                self._lines.add_lines(self._format_sequences(
+                    range(num_sequences - 2, num_sequences)))
+
+    def _format_sequences(self, sequence_indices):
+        lines = []
+        for line_index in sequence_indices:
+            seq_str = str(self._obj._get_sequence_iloc_(line_index))
+            if len(seq_str) <= self._width:
+                formatted_seq = seq_str
+            else:
+                formatted_seq = (
+                    seq_str[0:self._num_characters_before_ellipse()] +
+                    self._ellipse_insert +
+                    seq_str[-self._num_characters_after_ellipse():]
+                )
+            lines.append(formatted_seq)
+        return lines
+
+    def _num_characters_before_ellipse(self):
+        return int(self._num_characters_to_display() / 2)
+
+    def _num_characters_after_ellipse(self):
+        return (self._num_characters_to_display() -
+                self._num_characters_before_ellipse())
+
+    def _num_characters_to_display(self):
+        return self._width - len(self._ellipse_insert)
diff --git a/skbio/alignment/_ssw_wrapper.c b/skbio/alignment/_ssw_wrapper.c
index 0602656..225e2ba 100644
--- a/skbio/alignment/_ssw_wrapper.c
+++ b/skbio/alignment/_ssw_wrapper.c
@@ -1,25 +1,26 @@
-/* Generated by Cython 0.22.1 */
+/* Generated by Cython 0.23.4 */
+
+/* BEGIN: Cython Metadata
+{
+    "distutils": {
+        "depends": [
+            "skbio/alignment/_lib/ssw.h"
+        ],
+        "extra_compile_args": [
+            "-Wno-error=declaration-after-statement"
+        ]
+    }
+}
+END: Cython Metadata */
 
 #define PY_SSIZE_T_CLEAN
-#ifndef CYTHON_USE_PYLONG_INTERNALS
-#ifdef PYLONG_BITS_IN_DIGIT
-#define CYTHON_USE_PYLONG_INTERNALS 0
-#else
-#include "pyconfig.h"
-#ifdef PYLONG_BITS_IN_DIGIT
-#define CYTHON_USE_PYLONG_INTERNALS 1
-#else
-#define CYTHON_USE_PYLONG_INTERNALS 0
-#endif
-#endif
-#endif
 #include "Python.h"
 #ifndef Py_PYTHON_H
     #error Python headers needed to compile C extensions, please install development version of Python.
 #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
     #error Cython requires Python 2.6+ or Python 3.2+.
 #else
-#define CYTHON_ABI "0_22_1"
+#define CYTHON_ABI "0_23_4"
 #include <stddef.h>
 #ifndef offsetof
 #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
@@ -54,6 +55,9 @@
 #define CYTHON_COMPILING_IN_PYPY 0
 #define CYTHON_COMPILING_IN_CPYTHON 1
 #endif
+#if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000
+#define CYTHON_USE_PYLONG_INTERNALS 1
+#endif
 #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
 #define Py_OptimizeFlag 0
 #endif
@@ -61,12 +65,12 @@
 #define CYTHON_FORMAT_SSIZE_T "z"
 #if PY_MAJOR_VERSION < 3
   #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
           PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyClass_Type
 #else
   #define __Pyx_BUILTIN_MODULE_NAME "builtins"
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
           PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyType_Type
 #endif
@@ -84,7 +88,7 @@
 #endif
 #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
   #define CYTHON_PEP393_ENABLED 1
-  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ? \
+  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ?\
                                               0 : _PyUnicode_Ready((PyObject *)(op)))
   #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_LENGTH(u)
   #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
@@ -103,12 +107,10 @@
 #if CYTHON_COMPILING_IN_PYPY
   #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
   #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
-  #define __Pyx_PyFrozenSet_Size(s)         PyObject_Size(s)
 #else
   #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
-  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
+  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
       PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
-  #define __Pyx_PyFrozenSet_Size(s)         PySet_Size(s)
 #endif
 #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
   #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)
@@ -176,16 +178,18 @@
 #else
   #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
 #endif
-#ifndef CYTHON_INLINE
-  #if defined(__GNUC__)
-    #define CYTHON_INLINE __inline__
-  #elif defined(_MSC_VER)
-    #define CYTHON_INLINE __inline
-  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-    #define CYTHON_INLINE inline
-  #else
-    #define CYTHON_INLINE
-  #endif
+#if PY_VERSION_HEX >= 0x030500B1
+#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+#elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+typedef struct {
+    unaryfunc am_await;
+    unaryfunc am_aiter;
+    unaryfunc am_anext;
+} __Pyx_PyAsyncMethodsStruct;
+#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+#else
+#define __Pyx_PyType_AsAsync(obj) NULL
 #endif
 #ifndef CYTHON_RESTRICT
   #if defined(__GNUC__)
@@ -198,35 +202,33 @@
     #define CYTHON_RESTRICT
   #endif
 #endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+
+#ifndef CYTHON_INLINE
+  #if defined(__GNUC__)
+    #define CYTHON_INLINE __inline__
+  #elif defined(_MSC_VER)
+    #define CYTHON_INLINE __inline
+  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define CYTHON_INLINE inline
+  #else
+    #define CYTHON_INLINE
+  #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+  #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
 #ifdef NAN
 #define __PYX_NAN() ((float) NAN)
 #else
 static CYTHON_INLINE float __PYX_NAN() {
-  /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
-   a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
-   a quiet NaN. */
   float value;
   memset(&value, 0xFF, sizeof(value));
   return value;
 }
 #endif
-#define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None)
-#ifdef __cplusplus
-template<typename T>
-void __Pyx_call_destructor(T* x) {
-    x->~T();
-}
-template<typename T>
-class __Pyx_FakeReference {
-  public:
-    __Pyx_FakeReference() : ptr(NULL) { }
-    __Pyx_FakeReference(T& ref) : ptr(&ref) { }
-    T *operator->() { return ptr; }
-    operator T&() { return *ptr; }
-  private:
-    T *ptr;
-};
-#endif
 
 
 #if PY_MAJOR_VERSION >= 3
@@ -245,10 +247,6 @@ class __Pyx_FakeReference {
   #endif
 #endif
 
-#if defined(WIN32) || defined(MS_WINDOWS)
-#define _USE_MATH_DEFINES
-#endif
-#include <math.h>
 #define __PYX_HAVE__skbio__alignment___ssw_wrapper
 #define __PYX_HAVE_API__skbio__alignment___ssw_wrapper
 #include "string.h"
@@ -294,16 +292,34 @@ typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
 #define __PYX_DEFAULT_STRING_ENCODING ""
 #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
 #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (    \
-    (sizeof(type) < sizeof(Py_ssize_t))  ||             \
-    (sizeof(type) > sizeof(Py_ssize_t) &&               \
-          likely(v < (type)PY_SSIZE_T_MAX ||            \
-                 v == (type)PY_SSIZE_T_MAX)  &&         \
-          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||       \
-                                v == (type)PY_SSIZE_T_MIN)))  ||  \
-    (sizeof(type) == sizeof(Py_ssize_t) &&              \
-          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||        \
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (\
+    (sizeof(type) < sizeof(Py_ssize_t))  ||\
+    (sizeof(type) > sizeof(Py_ssize_t) &&\
+          likely(v < (type)PY_SSIZE_T_MAX ||\
+                 v == (type)PY_SSIZE_T_MAX)  &&\
+          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+                                v == (type)PY_SSIZE_T_MIN)))  ||\
+    (sizeof(type) == sizeof(Py_ssize_t) &&\
+          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
                                v == (type)PY_SSIZE_T_MAX)))  )
+#if defined (__cplusplus) && __cplusplus >= 201103L
+    #include <cstdlib>
+    #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+    #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+    #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER) && defined (_M_X64)
+    #define __Pyx_sst_abs(value) _abs64(value)
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+    #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+    #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
 static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
 static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
 #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
@@ -338,8 +354,9 @@ static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
 #define __Pyx_PyUnicode_FromUnicode(u)       PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
 #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
 #define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
-#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
-#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
 static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
 static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
 static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
@@ -510,7 +527,7 @@ typedef struct {
 } __Pyx_BufFmt_Context;
 
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":725
  * # in Cython to enable them only on the right systems.
  * 
  * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
@@ -519,7 +536,7 @@ typedef struct {
  */
 typedef npy_int8 __pyx_t_5numpy_int8_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":726
  * 
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
@@ -528,7 +545,7 @@ typedef npy_int8 __pyx_t_5numpy_int8_t;
  */
 typedef npy_int16 __pyx_t_5numpy_int16_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":727
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
@@ -537,7 +554,7 @@ typedef npy_int16 __pyx_t_5numpy_int16_t;
  */
 typedef npy_int32 __pyx_t_5numpy_int32_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":729
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":728
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t
  * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
@@ -546,7 +563,7 @@ typedef npy_int32 __pyx_t_5numpy_int32_t;
  */
 typedef npy_int64 __pyx_t_5numpy_int64_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":732
  * #ctypedef npy_int128     int128_t
  * 
  * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
@@ -555,7 +572,7 @@ typedef npy_int64 __pyx_t_5numpy_int64_t;
  */
 typedef npy_uint8 __pyx_t_5numpy_uint8_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":733
  * 
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
@@ -564,7 +581,7 @@ typedef npy_uint8 __pyx_t_5numpy_uint8_t;
  */
 typedef npy_uint16 __pyx_t_5numpy_uint16_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":734
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
@@ -573,7 +590,7 @@ typedef npy_uint16 __pyx_t_5numpy_uint16_t;
  */
 typedef npy_uint32 __pyx_t_5numpy_uint32_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":736
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":735
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t
  * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
@@ -582,7 +599,7 @@ typedef npy_uint32 __pyx_t_5numpy_uint32_t;
  */
 typedef npy_uint64 __pyx_t_5numpy_uint64_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":739
  * #ctypedef npy_uint128    uint128_t
  * 
  * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
@@ -591,7 +608,7 @@ typedef npy_uint64 __pyx_t_5numpy_uint64_t;
  */
 typedef npy_float32 __pyx_t_5numpy_float32_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":741
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":740
  * 
  * ctypedef npy_float32    float32_t
  * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
@@ -600,7 +617,7 @@ typedef npy_float32 __pyx_t_5numpy_float32_t;
  */
 typedef npy_float64 __pyx_t_5numpy_float64_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":749
  * # The int types are mapped a bit surprising --
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
@@ -609,7 +626,7 @@ typedef npy_float64 __pyx_t_5numpy_float64_t;
  */
 typedef npy_long __pyx_t_5numpy_int_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":750
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
@@ -618,7 +635,7 @@ typedef npy_long __pyx_t_5numpy_int_t;
  */
 typedef npy_longlong __pyx_t_5numpy_long_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":751
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t
  * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
@@ -627,7 +644,7 @@ typedef npy_longlong __pyx_t_5numpy_long_t;
  */
 typedef npy_longlong __pyx_t_5numpy_longlong_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":753
  * ctypedef npy_longlong   longlong_t
  * 
  * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
@@ -636,7 +653,7 @@ typedef npy_longlong __pyx_t_5numpy_longlong_t;
  */
 typedef npy_ulong __pyx_t_5numpy_uint_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":754
  * 
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
@@ -645,7 +662,7 @@ typedef npy_ulong __pyx_t_5numpy_uint_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":755
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t
  * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
@@ -654,7 +671,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":757
  * ctypedef npy_ulonglong  ulonglong_t
  * 
  * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
@@ -663,7 +680,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
  */
 typedef npy_intp __pyx_t_5numpy_intp_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":758
  * 
  * ctypedef npy_intp       intp_t
  * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
@@ -672,7 +689,7 @@ typedef npy_intp __pyx_t_5numpy_intp_t;
  */
 typedef npy_uintp __pyx_t_5numpy_uintp_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":760
  * ctypedef npy_uintp      uintp_t
  * 
  * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
@@ -681,7 +698,7 @@ typedef npy_uintp __pyx_t_5numpy_uintp_t;
  */
 typedef npy_double __pyx_t_5numpy_float_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":761
  * 
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
@@ -690,7 +707,7 @@ typedef npy_double __pyx_t_5numpy_float_t;
  */
 typedef npy_double __pyx_t_5numpy_double_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":762
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t
  * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
@@ -723,7 +740,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
 struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
 struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":764
  * ctypedef npy_longdouble longdouble_t
  * 
  * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
@@ -732,7 +749,7 @@ struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
  */
 typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":765
  * 
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
@@ -741,7 +758,7 @@ typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
  */
 typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":767
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":766
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t
  * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
@@ -750,7 +767,7 @@ typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
  */
 typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":768
  * ctypedef npy_clongdouble clongdouble_t
  * 
  * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
@@ -759,7 +776,7 @@ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
  */
 typedef npy_cdouble __pyx_t_5numpy_complex_t;
 
-/* "skbio/alignment/_ssw_wrapper.pyx":74
+/* "skbio/alignment/_ssw_wrapper.pyx":73
  * 
  * 
  * cdef class AlignmentStructure:             # <<<<<<<<<<<<<<
@@ -777,7 +794,7 @@ struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure {
 };
 
 
-/* "skbio/alignment/_ssw_wrapper.pyx":401
+/* "skbio/alignment/_ssw_wrapper.pyx":400
  *         return tuples
  * 
  * cdef class StripedSmithWaterman:             # <<<<<<<<<<<<<<
@@ -804,7 +821,7 @@ struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman {
 
 
 
-/* "skbio/alignment/_ssw_wrapper.pyx":74
+/* "skbio/alignment/_ssw_wrapper.pyx":73
  * 
  * 
  * cdef class AlignmentStructure:             # <<<<<<<<<<<<<<
@@ -818,7 +835,7 @@ struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure {
 static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
 
 
-/* "skbio/alignment/_ssw_wrapper.pyx":401
+/* "skbio/alignment/_ssw_wrapper.pyx":400
  *         return tuples
  * 
  * cdef class StripedSmithWaterman:             # <<<<<<<<<<<<<<
@@ -850,19 +867,19 @@ static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWate
   static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
   #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
 #ifdef WITH_THREAD
-  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
-          if (acquire_gil) { \
-              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
-              PyGILState_Release(__pyx_gilstate_save); \
-          } else { \
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+          if (acquire_gil) {\
+              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+              PyGILState_Release(__pyx_gilstate_save);\
+          } else {\
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
           }
 #else
-  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
           __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
 #endif
-  #define __Pyx_RefNannyFinishContext() \
+  #define __Pyx_RefNannyFinishContext()\
           __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
   #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
   #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
@@ -885,13 +902,13 @@ static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWate
   #define __Pyx_XGOTREF(r)
   #define __Pyx_XGIVEREF(r)
 #endif
-#define __Pyx_XDECREF_SET(r, v) do {                            \
-        PyObject *tmp = (PyObject *) r;                         \
-        r = v; __Pyx_XDECREF(tmp);                              \
+#define __Pyx_XDECREF_SET(r, v) do {\
+        PyObject *tmp = (PyObject *) r;\
+        r = v; __Pyx_XDECREF(tmp);\
     } while (0)
-#define __Pyx_DECREF_SET(r, v) do {                             \
-        PyObject *tmp = (PyObject *) r;                         \
-        r = v; __Pyx_DECREF(tmp);                               \
+#define __Pyx_DECREF_SET(r, v) do {\
+        PyObject *tmp = (PyObject *) r;\
+        r = v; __Pyx_DECREF(tmp);\
     } while (0)
 #define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
 #define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
@@ -918,8 +935,8 @@ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
 
 static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
 
-static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
-    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
     const char* function_name);
 
 static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
@@ -986,20 +1003,20 @@ static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
 
 static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
 
-#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
-    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
-    __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \
-    (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+    __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
+    (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
                __Pyx_GetItemInt_Generic(o, to_py_func(i))))
-#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
-    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
-    __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+    __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
     (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
 static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
                                                               int wraparound, int boundscheck);
-#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
-    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
-    __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+    __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
     (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
 static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
                                                               int wraparound, int boundscheck);
@@ -1019,6 +1036,13 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
 #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
 #endif
 
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
+#else
+#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\
+    (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
+#endif
+
 static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
 
 static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
@@ -1052,7 +1076,17 @@ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
 
 static void __Pyx_RaiseBufferFallbackError(void);
 
-static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* proto */
+static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
+
+static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*);
+
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyObject_Ord(c)\
+    (likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c))
+#else
+#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c)
+#endif
+static long __Pyx__PyObject_Ord(PyObject* c);
 
 #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
 static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
@@ -1081,6 +1115,8 @@ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
 
 static int __Pyx_SetVtable(PyObject *dict, void *vtable);
 
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
 static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
 
 static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name);
@@ -1127,14 +1163,10 @@ typedef struct {
 static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
 static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
-
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint16(npy_uint16 value);
 
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
-
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int32(npy_int32 value);
 
 static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *);
@@ -1147,6 +1179,8 @@ static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *);
 
 static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *);
 
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
 #if CYTHON_CCOMPLEX
   #ifdef __cplusplus
     #define __Pyx_CREAL(z) ((z).real())
@@ -1245,6 +1279,8 @@ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(do
     #endif
 #endif
 
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
+
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
 
 static int __Pyx_check_binary_version(void);
@@ -1270,6 +1306,17 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
 
 /* Module declarations from 'cpython.version' */
 
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'cpython.object' */
+
 /* Module declarations from 'cpython.ref' */
 
 /* Module declarations from 'cpython.exc' */
@@ -1282,23 +1329,12 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
 
 /* Module declarations from 'cpython.list' */
 
-/* Module declarations from 'libc.string' */
-
-/* Module declarations from 'libc.stdio' */
-
-/* Module declarations from 'cpython.object' */
-
 /* Module declarations from 'cpython.sequence' */
 
 /* Module declarations from 'cpython.mapping' */
 
 /* Module declarations from 'cpython.iterator' */
 
-/* Module declarations from '__builtin__' */
-
-/* Module declarations from 'cpython.type' */
-static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
-
 /* Module declarations from 'cpython.number' */
 
 /* Module declarations from 'cpython.int' */
@@ -1376,37 +1412,7 @@ static PyObject *__pyx_builtin_range;
 static PyObject *__pyx_builtin_ValueError;
 static PyObject *__pyx_builtin_Exception;
 static PyObject *__pyx_builtin_enumerate;
-static PyObject *__pyx_builtin_ord;
 static PyObject *__pyx_builtin_RuntimeError;
-static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_read_sequence, PyObject *__pyx_v_reference_sequence, PyObject *__pyx_v_index_starts_at); /* proto */
-static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_4__getitem__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_6__repr__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_8__str__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_is_zero_based); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_38_get_aligned_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_sequence, PyObject *__pyx_v_tuple_cigar, PyObject *__pyx_v_begin, PyObject *__pyx_v_end, PyObject *__pyx_v_gap_type); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_40_tuples_from_cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
-static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_query_sequence, PyObject *__pyx_v_gap_open_penalty, PyObject *__pyx_v_gap_extend_penalty, PyObject *__pyx_v_score_size, PyObject *__pyx_v_mask_length, PyObject *__pyx_v_mask_auto, PyObject *__pyx_v_score_only, PyObject *__pyx_v_score_filter, PyObject *__pyx_v_distance_filter, PyObject *__pyx_v_overrid [...]
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_target_sequence); /* proto */
-static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_6_get_bit_flag(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_override_skip_babp, PyObject *__pyx_v_score_only); /* proto */
-static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
-static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
 static char __pyx_k_[] = ",\n";
 static char __pyx_k_B[] = "B";
 static char __pyx_k_D[] = "D";
@@ -1435,7 +1441,6 @@ static char __pyx_k__6[] = "";
 static char __pyx_k__8[] = "-";
 static char __pyx_k_np[] = "np";
 static char __pyx_k_end[] = "end";
-static char __pyx_k_ord[] = "ord";
 static char __pyx_k_r_r[] = "    {!r}: {!r}";
 static char __pyx_k_int8[] = "int8";
 static char __pyx_k_join[] = "join";
@@ -1460,7 +1465,6 @@ static char __pyx_k_Sequence[] = "Sequence";
 static char __pyx_k_gap_type[] = "gap_type";
 static char __pyx_k_property[] = "property";
 static char __pyx_k_sequence[] = "sequence";
-static char __pyx_k_Alignment[] = "Alignment";
 static char __pyx_k_Exception[] = "Exception";
 static char __pyx_k_enumerate[] = "enumerate";
 static char __pyx_k_mask_auto[] = "mask_auto";
@@ -1489,7 +1493,6 @@ static char __pyx_k_set_zero_based[] = "set_zero_based";
 static char __pyx_k_skbio_sequence[] = "skbio.sequence";
 static char __pyx_k_distance_filter[] = "distance_filter";
 static char __pyx_k_index_starts_at[] = "index_starts_at";
-static char __pyx_k_skbio_alignment[] = "skbio.alignment";
 static char __pyx_k_target_sequence[] = "target_sequence";
 static char __pyx_k_gap_open_penalty[] = "gap_open_penalty";
 static char __pyx_k_tuples_from_cigar[] = "_tuples_from_cigar";
@@ -1518,7 +1521,6 @@ static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string alloca
 static PyObject *__pyx_kp_s_;
 static PyObject *__pyx_n_s_ACGTN;
 static PyObject *__pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX;
-static PyObject *__pyx_n_s_Alignment;
 static PyObject *__pyx_n_s_D;
 static PyObject *__pyx_n_s_Exception;
 static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
@@ -1575,7 +1577,6 @@ static PyObject *__pyx_n_s_np_aa_table;
 static PyObject *__pyx_n_s_np_nt_table;
 static PyObject *__pyx_n_s_numpy;
 static PyObject *__pyx_n_s_optimal_alignment_score;
-static PyObject *__pyx_n_s_ord;
 static PyObject *__pyx_n_s_override_skip_babp;
 static PyObject *__pyx_n_s_property;
 static PyObject *__pyx_n_s_protein;
@@ -1593,7 +1594,6 @@ static PyObject *__pyx_n_s_score_only;
 static PyObject *__pyx_n_s_score_size;
 static PyObject *__pyx_n_s_sequence;
 static PyObject *__pyx_n_s_set_zero_based;
-static PyObject *__pyx_n_s_skbio_alignment;
 static PyObject *__pyx_n_s_skbio_sequence;
 static PyObject *__pyx_n_s_suboptimal_alignment_score;
 static PyObject *__pyx_n_s_substitution_matrix;
@@ -1607,6 +1607,35 @@ static PyObject *__pyx_n_s_tuple_cigar;
 static PyObject *__pyx_n_s_tuples_from_cigar;
 static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
 static PyObject *__pyx_n_s_zero_index;
+static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_read_sequence, PyObject *__pyx_v_reference_sequence, PyObject *__pyx_v_index_starts_at); /* proto */
+static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_4__getitem__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_6__repr__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_8__str__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_is_zero_based); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_38_get_aligned_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_sequence, PyObject *__pyx_v_tuple_cigar, PyObject *__pyx_v_begin, PyObject *__pyx_v_end, PyObject *__pyx_v_gap_type); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_40_tuples_from_cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_query_sequence, PyObject *__pyx_v_gap_open_penalty, PyObject *__pyx_v_gap_extend_penalty, PyObject *__pyx_v_score_size, PyObject *__pyx_v_mask_length, PyObject *__pyx_v_mask_auto, PyObject *__pyx_v_score_only, PyObject *__pyx_v_score_filter, PyObject *__pyx_v_distance_filter, PyObject *__pyx_v_overrid [...]
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_target_sequence); /* proto */
+static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_6_get_bit_flag(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_override_skip_babp, PyObject *__pyx_v_score_only); /* proto */
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
+static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
 static PyObject *__pyx_int_0;
 static PyObject *__pyx_int_1;
 static PyObject *__pyx_int_2;
@@ -1646,7 +1675,7 @@ static PyObject *__pyx_tuple__15;
 static PyObject *__pyx_tuple__16;
 static PyObject *__pyx_tuple__17;
 
-/* "skbio/alignment/_ssw_wrapper.pyx":92
+/* "skbio/alignment/_ssw_wrapper.pyx":91
  *     cdef str _cigar_string
  * 
  *     def __cinit__(self, read_sequence, reference_sequence, index_starts_at):             # <<<<<<<<<<<<<<
@@ -1687,16 +1716,16 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_1__cin
         case  1:
         if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_reference_sequence)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  2:
         if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_index_starts_at)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
       goto __pyx_L5_argtuple_error;
@@ -1711,7 +1740,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_1__cin
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -1734,14 +1763,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__cinit__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":96
+  /* "skbio/alignment/_ssw_wrapper.pyx":95
  *         # treated sematically as a private output of ssw.c like the `s_align`
  *         # struct
  *         self.read_sequence = read_sequence             # <<<<<<<<<<<<<<
  *         self.reference_sequence = reference_sequence
  *         self.index_starts_at = index_starts_at
  */
-  if (!(likely(PyString_CheckExact(__pyx_v_read_sequence))||((__pyx_v_read_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_read_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_v_read_sequence))||((__pyx_v_read_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_read_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_read_sequence;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -1750,14 +1779,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   __pyx_v_self->read_sequence = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":97
+  /* "skbio/alignment/_ssw_wrapper.pyx":96
  *         # struct
  *         self.read_sequence = read_sequence
  *         self.reference_sequence = reference_sequence             # <<<<<<<<<<<<<<
  *         self.index_starts_at = index_starts_at
  * 
  */
-  if (!(likely(PyString_CheckExact(__pyx_v_reference_sequence))||((__pyx_v_reference_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_reference_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_v_reference_sequence))||((__pyx_v_reference_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_reference_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_reference_sequence;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -1766,17 +1795,17 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   __pyx_v_self->reference_sequence = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":98
+  /* "skbio/alignment/_ssw_wrapper.pyx":97
  *         self.read_sequence = read_sequence
  *         self.reference_sequence = reference_sequence
  *         self.index_starts_at = index_starts_at             # <<<<<<<<<<<<<<
  * 
  *     cdef __constructor(self, s_align* pointer):
  */
-  __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_index_starts_at); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_index_starts_at); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_self->index_starts_at = __pyx_t_2;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":92
+  /* "skbio/alignment/_ssw_wrapper.pyx":91
  *     cdef str _cigar_string
  * 
  *     def __cinit__(self, read_sequence, reference_sequence, index_starts_at):             # <<<<<<<<<<<<<<
@@ -1796,7 +1825,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":100
+/* "skbio/alignment/_ssw_wrapper.pyx":99
  *         self.index_starts_at = index_starts_at
  * 
  *     cdef __constructor(self, s_align* pointer):             # <<<<<<<<<<<<<<
@@ -1809,7 +1838,7 @@ static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure__
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("__constructor", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":101
+  /* "skbio/alignment/_ssw_wrapper.pyx":100
  * 
  *     cdef __constructor(self, s_align* pointer):
  *         self.p = pointer             # <<<<<<<<<<<<<<
@@ -1818,7 +1847,7 @@ static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure__
  */
   __pyx_v_self->p = __pyx_v_pointer;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":100
+  /* "skbio/alignment/_ssw_wrapper.pyx":99
  *         self.index_starts_at = index_starts_at
  * 
  *     cdef __constructor(self, s_align* pointer):             # <<<<<<<<<<<<<<
@@ -1833,7 +1862,7 @@ static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure__
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":103
+/* "skbio/alignment/_ssw_wrapper.pyx":102
  *         self.p = pointer
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -1857,7 +1886,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__dealloc__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":104
+  /* "skbio/alignment/_ssw_wrapper.pyx":103
  * 
  *     def __dealloc__(self):
  *         if self.p is not NULL:             # <<<<<<<<<<<<<<
@@ -1867,7 +1896,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
   __pyx_t_1 = ((__pyx_v_self->p != NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":105
+    /* "skbio/alignment/_ssw_wrapper.pyx":104
  *     def __dealloc__(self):
  *         if self.p is not NULL:
  *             align_destroy(self.p)             # <<<<<<<<<<<<<<
@@ -1875,11 +1904,17 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
  *     def __getitem__(self, key):
  */
     align_destroy(__pyx_v_self->p);
-    goto __pyx_L3;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":103
+ * 
+ *     def __dealloc__(self):
+ *         if self.p is not NULL:             # <<<<<<<<<<<<<<
+ *             align_destroy(self.p)
+ * 
+ */
   }
-  __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":103
+  /* "skbio/alignment/_ssw_wrapper.pyx":102
  *         self.p = pointer
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -1891,7 +1926,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
   __Pyx_RefNannyFinishContext();
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":107
+/* "skbio/alignment/_ssw_wrapper.pyx":106
  *             align_destroy(self.p)
  * 
  *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
@@ -1921,7 +1956,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__getitem__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":108
+  /* "skbio/alignment/_ssw_wrapper.pyx":107
  * 
  *     def __getitem__(self, key):
  *         return getattr(self, key)             # <<<<<<<<<<<<<<
@@ -1929,13 +1964,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     def __repr__(self):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_GetAttr(((PyObject *)__pyx_v_self), __pyx_v_key); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetAttr(((PyObject *)__pyx_v_self), __pyx_v_key); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":107
+  /* "skbio/alignment/_ssw_wrapper.pyx":106
  *             align_destroy(self.p)
  * 
  *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
@@ -1954,7 +1989,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":110
+/* "skbio/alignment/_ssw_wrapper.pyx":109
  *         return getattr(self, key)
  * 
  *     def __repr__(self):             # <<<<<<<<<<<<<<
@@ -1994,14 +2029,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__repr__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":111
+  /* "skbio/alignment/_ssw_wrapper.pyx":110
  * 
  *     def __repr__(self):
  *         data = ['optimal_alignment_score', 'suboptimal_alignment_score',             # <<<<<<<<<<<<<<
  *                 'query_begin', 'query_end', 'target_begin',
  *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
  */
-  __pyx_t_1 = PyList_New(10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_optimal_alignment_score);
   __Pyx_GIVEREF(__pyx_n_s_optimal_alignment_score);
@@ -2036,7 +2071,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_v_data = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":115
+  /* "skbio/alignment/_ssw_wrapper.pyx":114
  *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
  *                 'query_sequence', 'target_sequence']
  *         return "{\n%s\n}" % ',\n'.join([             # <<<<<<<<<<<<<<
@@ -2044,10 +2079,10 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  * 
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":116
+  /* "skbio/alignment/_ssw_wrapper.pyx":115
  *                 'query_sequence', 'target_sequence']
  *         return "{\n%s\n}" % ',\n'.join([
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])             # <<<<<<<<<<<<<<
@@ -2058,16 +2093,16 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   for (;;) {
     if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
     #if CYTHON_COMPILING_IN_CPYTHON
-    __pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #else
-    __pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
     #endif
     __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_4);
     __pyx_t_4 = 0;
-    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_r_r, __pyx_n_s_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_r_r, __pyx_n_s_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_6 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_k); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_6 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_k); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_6);
     __pyx_t_7 = NULL;
     __pyx_t_8 = 0;
@@ -2081,7 +2116,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         __pyx_t_8 = 1;
       }
     }
-    __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_9);
     if (__pyx_t_7) {
       __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
@@ -2092,33 +2127,33 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __Pyx_GIVEREF(__pyx_t_6);
     PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_t_6);
     __pyx_t_6 = 0;
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
     __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":115
+  /* "skbio/alignment/_ssw_wrapper.pyx":114
  *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
  *                 'query_sequence', 'target_sequence']
  *         return "{\n%s\n}" % ',\n'.join([             # <<<<<<<<<<<<<<
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])
  * 
  */
-  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s_, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s_, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_s, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_s, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":110
+  /* "skbio/alignment/_ssw_wrapper.pyx":109
  *         return getattr(self, key)
  * 
  *     def __repr__(self):             # <<<<<<<<<<<<<<
@@ -2145,7 +2180,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":118
+/* "skbio/alignment/_ssw_wrapper.pyx":117
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])
  * 
  *     def __str__(self):             # <<<<<<<<<<<<<<
@@ -2184,140 +2219,146 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__str__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":119
+  /* "skbio/alignment/_ssw_wrapper.pyx":118
  * 
  *     def __str__(self):
  *         score = "Score: %d" % self.optimal_alignment_score             # <<<<<<<<<<<<<<
  *         if self.query_sequence and self.cigar:
  *             target = self.aligned_target_sequence
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Score_d, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Score_d, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_score = ((PyObject*)__pyx_t_2);
   __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":120
+  /* "skbio/alignment/_ssw_wrapper.pyx":119
  *     def __str__(self):
  *         score = "Score: %d" % self.optimal_alignment_score
  *         if self.query_sequence and self.cigar:             # <<<<<<<<<<<<<<
  *             target = self.aligned_target_sequence
  *             query = self.aligned_query_sequence
  */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   if (__pyx_t_4) {
   } else {
     __pyx_t_3 = __pyx_t_4;
     goto __pyx_L4_bool_binop_done;
   }
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __pyx_t_3 = __pyx_t_4;
   __pyx_L4_bool_binop_done:;
   if (__pyx_t_3) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":121
+    /* "skbio/alignment/_ssw_wrapper.pyx":120
  *         score = "Score: %d" % self.optimal_alignment_score
  *         if self.query_sequence and self.cigar:
  *             target = self.aligned_target_sequence             # <<<<<<<<<<<<<<
  *             query = self.aligned_query_sequence
  *             align_len = len(query)
  */
-    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_target = __pyx_t_2;
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":122
+    /* "skbio/alignment/_ssw_wrapper.pyx":121
  *         if self.query_sequence and self.cigar:
  *             target = self.aligned_target_sequence
  *             query = self.aligned_query_sequence             # <<<<<<<<<<<<<<
  *             align_len = len(query)
  *             if align_len > 13:
  */
-    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_query = __pyx_t_2;
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":123
+    /* "skbio/alignment/_ssw_wrapper.pyx":122
  *             target = self.aligned_target_sequence
  *             query = self.aligned_query_sequence
  *             align_len = len(query)             # <<<<<<<<<<<<<<
  *             if align_len > 13:
  *                 target = target[:10] + "..."
  */
-    __pyx_t_5 = PyObject_Length(__pyx_v_query); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = PyObject_Length(__pyx_v_query); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_align_len = __pyx_t_2;
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":124
+    /* "skbio/alignment/_ssw_wrapper.pyx":123
  *             query = self.aligned_query_sequence
  *             align_len = len(query)
  *             if align_len > 13:             # <<<<<<<<<<<<<<
  *                 target = target[:10] + "..."
  *                 query = query[:10] + "..."
  */
-    __pyx_t_2 = PyObject_RichCompare(__pyx_v_align_len, __pyx_int_13, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyObject_RichCompare(__pyx_v_align_len, __pyx_int_13, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     if (__pyx_t_3) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":125
+      /* "skbio/alignment/_ssw_wrapper.pyx":124
  *             align_len = len(query)
  *             if align_len > 13:
  *                 target = target[:10] + "..."             # <<<<<<<<<<<<<<
  *                 query = query[:10] + "..."
  * 
  */
-      __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_target, 0, 10, NULL, NULL, &__pyx_slice__2, 0, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_target, 0, 10, NULL, NULL, &__pyx_slice__2, 0, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
-      __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
       __Pyx_DECREF_SET(__pyx_v_target, __pyx_t_1);
       __pyx_t_1 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":126
+      /* "skbio/alignment/_ssw_wrapper.pyx":125
  *             if align_len > 13:
  *                 target = target[:10] + "..."
  *                 query = query[:10] + "..."             # <<<<<<<<<<<<<<
  * 
  *             length = "Length: %d" % align_len
  */
-      __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_query, 0, 10, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_query, 0, 10, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_s__3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_s__3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
       __Pyx_DECREF_SET(__pyx_v_query, __pyx_t_2);
       __pyx_t_2 = 0;
-      goto __pyx_L6;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":123
+ *             query = self.aligned_query_sequence
+ *             align_len = len(query)
+ *             if align_len > 13:             # <<<<<<<<<<<<<<
+ *                 target = target[:10] + "..."
+ *                 query = query[:10] + "..."
+ */
     }
-    __pyx_L6:;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":128
+    /* "skbio/alignment/_ssw_wrapper.pyx":127
  *                 query = query[:10] + "..."
  * 
  *             length = "Length: %d" % align_len             # <<<<<<<<<<<<<<
  *             return "\n".join([query, target, score, length])
  *         return score
  */
-    __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Length_d, __pyx_v_align_len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Length_d, __pyx_v_align_len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_length = ((PyObject*)__pyx_t_2);
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":129
+    /* "skbio/alignment/_ssw_wrapper.pyx":128
  * 
  *             length = "Length: %d" % align_len
  *             return "\n".join([query, target, score, length])             # <<<<<<<<<<<<<<
@@ -2325,7 +2366,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  * 
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_2 = PyList_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyList_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __Pyx_INCREF(__pyx_v_query);
     __Pyx_GIVEREF(__pyx_v_query);
@@ -2339,15 +2380,23 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __Pyx_INCREF(__pyx_v_length);
     __Pyx_GIVEREF(__pyx_v_length);
     PyList_SET_ITEM(__pyx_t_2, 3, __pyx_v_length);
-    __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__5, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__5, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     __pyx_r = __pyx_t_1;
     __pyx_t_1 = 0;
     goto __pyx_L0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":119
+ *     def __str__(self):
+ *         score = "Score: %d" % self.optimal_alignment_score
+ *         if self.query_sequence and self.cigar:             # <<<<<<<<<<<<<<
+ *             target = self.aligned_target_sequence
+ *             query = self.aligned_query_sequence
+ */
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":130
+  /* "skbio/alignment/_ssw_wrapper.pyx":129
  *             length = "Length: %d" % align_len
  *             return "\n".join([query, target, score, length])
  *         return score             # <<<<<<<<<<<<<<
@@ -2359,7 +2408,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_score;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":118
+  /* "skbio/alignment/_ssw_wrapper.pyx":117
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])
  * 
  *     def __str__(self):             # <<<<<<<<<<<<<<
@@ -2384,7 +2433,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":133
+/* "skbio/alignment/_ssw_wrapper.pyx":132
  * 
  *     @property
  *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2415,7 +2464,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("optimal_alignment_score", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":142
+  /* "skbio/alignment/_ssw_wrapper.pyx":141
  * 
  *         """
  *         return self.p.score1             # <<<<<<<<<<<<<<
@@ -2423,13 +2472,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":133
+  /* "skbio/alignment/_ssw_wrapper.pyx":132
  * 
  *     @property
  *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2448,7 +2497,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":145
+/* "skbio/alignment/_ssw_wrapper.pyx":144
  * 
  *     @property
  *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2479,7 +2528,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("suboptimal_alignment_score", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":154
+  /* "skbio/alignment/_ssw_wrapper.pyx":153
  * 
  *         """
  *         return self.p.score2             # <<<<<<<<<<<<<<
@@ -2487,13 +2536,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":145
+  /* "skbio/alignment/_ssw_wrapper.pyx":144
  * 
  *     @property
  *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2512,7 +2561,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":157
+/* "skbio/alignment/_ssw_wrapper.pyx":156
  * 
  *     @property
  *     def target_begin(self):             # <<<<<<<<<<<<<<
@@ -2544,7 +2593,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("target_begin", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":170
+  /* "skbio/alignment/_ssw_wrapper.pyx":169
  * 
  *         """
  *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1             # <<<<<<<<<<<<<<
@@ -2553,7 +2602,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   __Pyx_XDECREF(__pyx_r);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":171
+  /* "skbio/alignment/_ssw_wrapper.pyx":170
  *         """
  *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1
  *                                                             >= 0) else -1             # <<<<<<<<<<<<<<
@@ -2562,14 +2611,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   if (((__pyx_v_self->p->ref_begin1 >= 0) != 0)) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":170
+    /* "skbio/alignment/_ssw_wrapper.pyx":169
  * 
  *         """
  *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1             # <<<<<<<<<<<<<<
  *                                                             >= 0) else -1
  * 
  */
-    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyInt_From_npy_int32((__pyx_v_self->p->ref_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_t_1 = __pyx_t_2;
     __pyx_t_2 = 0;
@@ -2581,7 +2630,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":157
+  /* "skbio/alignment/_ssw_wrapper.pyx":156
  * 
  *     @property
  *     def target_begin(self):             # <<<<<<<<<<<<<<
@@ -2601,7 +2650,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":174
+/* "skbio/alignment/_ssw_wrapper.pyx":173
  * 
  *     @property
  *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
@@ -2632,7 +2681,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("target_end_optimal", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":188
+  /* "skbio/alignment/_ssw_wrapper.pyx":187
  * 
  *         """
  *         return self.p.ref_end1 + self.index_starts_at             # <<<<<<<<<<<<<<
@@ -2640,13 +2689,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_npy_int32((__pyx_v_self->p->ref_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":174
+  /* "skbio/alignment/_ssw_wrapper.pyx":173
  * 
  *     @property
  *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
@@ -2665,7 +2714,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":191
+/* "skbio/alignment/_ssw_wrapper.pyx":190
  * 
  *     @property
  *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
@@ -2696,7 +2745,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("target_end_suboptimal", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":205
+  /* "skbio/alignment/_ssw_wrapper.pyx":204
  * 
  *         """
  *         return self.p.ref_end2 + self.index_starts_at             # <<<<<<<<<<<<<<
@@ -2704,13 +2753,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end2 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_npy_int32((__pyx_v_self->p->ref_end2 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":191
+  /* "skbio/alignment/_ssw_wrapper.pyx":190
  * 
  *     @property
  *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
@@ -2729,7 +2778,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":208
+/* "skbio/alignment/_ssw_wrapper.pyx":207
  * 
  *     @property
  *     def query_begin(self):             # <<<<<<<<<<<<<<
@@ -2761,7 +2810,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("query_begin", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":221
+  /* "skbio/alignment/_ssw_wrapper.pyx":220
  * 
  *         """
  *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1             # <<<<<<<<<<<<<<
@@ -2770,7 +2819,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   __Pyx_XDECREF(__pyx_r);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":222
+  /* "skbio/alignment/_ssw_wrapper.pyx":221
  *         """
  *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1
  *                                                              >= 0) else -1             # <<<<<<<<<<<<<<
@@ -2779,14 +2828,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   if (((__pyx_v_self->p->read_begin1 >= 0) != 0)) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":221
+    /* "skbio/alignment/_ssw_wrapper.pyx":220
  * 
  *         """
  *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1             # <<<<<<<<<<<<<<
  *                                                              >= 0) else -1
  * 
  */
-    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyInt_From_npy_int32((__pyx_v_self->p->read_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_t_1 = __pyx_t_2;
     __pyx_t_2 = 0;
@@ -2798,7 +2847,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":208
+  /* "skbio/alignment/_ssw_wrapper.pyx":207
  * 
  *     @property
  *     def query_begin(self):             # <<<<<<<<<<<<<<
@@ -2818,7 +2867,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":225
+/* "skbio/alignment/_ssw_wrapper.pyx":224
  * 
  *     @property
  *     def query_end(self):             # <<<<<<<<<<<<<<
@@ -2849,7 +2898,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("query_end", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":238
+  /* "skbio/alignment/_ssw_wrapper.pyx":237
  * 
  *         """
  *         return self.p.read_end1 + self.index_starts_at             # <<<<<<<<<<<<<<
@@ -2857,13 +2906,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_npy_int32((__pyx_v_self->p->read_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":225
+  /* "skbio/alignment/_ssw_wrapper.pyx":224
  * 
  *     @property
  *     def query_end(self):             # <<<<<<<<<<<<<<
@@ -2882,7 +2931,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":241
+/* "skbio/alignment/_ssw_wrapper.pyx":240
  * 
  *     @property
  *     def cigar(self):             # <<<<<<<<<<<<<<
@@ -2922,7 +2971,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("cigar", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":263
+  /* "skbio/alignment/_ssw_wrapper.pyx":262
  *         """
  *         # Memoization! (1/2)
  *         if self._cigar_string is not None:             # <<<<<<<<<<<<<<
@@ -2933,7 +2982,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_t_2 = (__pyx_t_1 != 0);
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":264
+    /* "skbio/alignment/_ssw_wrapper.pyx":263
  *         # Memoization! (1/2)
  *         if self._cigar_string is not None:
  *             return self._cigar_string             # <<<<<<<<<<<<<<
@@ -2944,21 +2993,29 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __Pyx_INCREF(__pyx_v_self->_cigar_string);
     __pyx_r = __pyx_v_self->_cigar_string;
     goto __pyx_L0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":262
+ *         """
+ *         # Memoization! (1/2)
+ *         if self._cigar_string is not None:             # <<<<<<<<<<<<<<
+ *             return self._cigar_string
+ *         cigar_list = []
+ */
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":265
+  /* "skbio/alignment/_ssw_wrapper.pyx":264
  *         if self._cigar_string is not None:
  *             return self._cigar_string
  *         cigar_list = []             # <<<<<<<<<<<<<<
  *         for i in range(self.p.cigarLen):
  *             # stored the same as that in BAM format,
  */
-  __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_v_cigar_list = ((PyObject*)__pyx_t_3);
   __pyx_t_3 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":266
+  /* "skbio/alignment/_ssw_wrapper.pyx":265
  *             return self._cigar_string
  *         cigar_list = []
  *         for i in range(self.p.cigarLen):             # <<<<<<<<<<<<<<
@@ -2969,60 +3026,60 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
     __pyx_v_i = __pyx_t_5;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":271
+    /* "skbio/alignment/_ssw_wrapper.pyx":270
  * 
  *             # Length, remove first 4 bits
  *             cigar_list.append(str(self.p.cigar[i] >> 4))             # <<<<<<<<<<<<<<
  *             # M/I/D, lookup first 4 bits in the mid_table
  *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])
  */
-    __pyx_t_3 = __Pyx_PyInt_From_long(((__pyx_v_self->p->cigar[__pyx_v_i]) >> 4)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyInt_From_long(((__pyx_v_self->p->cigar[__pyx_v_i]) >> 4)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
     __Pyx_GIVEREF(__pyx_t_3);
     PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
     __pyx_t_3 = 0;
-    __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)(&PyString_Type)), __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_3); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_3); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":273
+    /* "skbio/alignment/_ssw_wrapper.pyx":272
  *             cigar_list.append(str(self.p.cigar[i] >> 4))
  *             # M/I/D, lookup first 4 bits in the mid_table
  *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])             # <<<<<<<<<<<<<<
  *         # Memoization! (2/2)
  *         self._cigar_string = "".join(cigar_list)
  */
-    __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_mid_table); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_mid_table); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __pyx_t_8 = ((__pyx_v_self->p->cigar[__pyx_v_i]) & 0xf);
-    __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, __pyx_t_8, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, __pyx_t_8, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_6);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_6); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_6); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":275
+  /* "skbio/alignment/_ssw_wrapper.pyx":274
  *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])
  *         # Memoization! (2/2)
  *         self._cigar_string = "".join(cigar_list)             # <<<<<<<<<<<<<<
  *         return self._cigar_string
  * 
  */
-  __pyx_t_6 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_cigar_list); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_cigar_list); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
-  if (!(likely(PyString_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GIVEREF(__pyx_t_6);
   __Pyx_GOTREF(__pyx_v_self->_cigar_string);
   __Pyx_DECREF(__pyx_v_self->_cigar_string);
   __pyx_v_self->_cigar_string = ((PyObject*)__pyx_t_6);
   __pyx_t_6 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":276
+  /* "skbio/alignment/_ssw_wrapper.pyx":275
  *         # Memoization! (2/2)
  *         self._cigar_string = "".join(cigar_list)
  *         return self._cigar_string             # <<<<<<<<<<<<<<
@@ -3034,7 +3091,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_self->_cigar_string;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":241
+  /* "skbio/alignment/_ssw_wrapper.pyx":240
  * 
  *     @property
  *     def cigar(self):             # <<<<<<<<<<<<<<
@@ -3055,7 +3112,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":279
+/* "skbio/alignment/_ssw_wrapper.pyx":278
  * 
  *     @property
  *     def query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3082,7 +3139,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("query_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":288
+  /* "skbio/alignment/_ssw_wrapper.pyx":287
  * 
  *         """
  *         return self.read_sequence             # <<<<<<<<<<<<<<
@@ -3094,7 +3151,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_self->read_sequence;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":279
+  /* "skbio/alignment/_ssw_wrapper.pyx":278
  * 
  *     @property
  *     def query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3109,7 +3166,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":291
+/* "skbio/alignment/_ssw_wrapper.pyx":290
  * 
  *     @property
  *     def target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3136,7 +3193,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("target_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":300
+  /* "skbio/alignment/_ssw_wrapper.pyx":299
  * 
  *         """
  *         return self.reference_sequence             # <<<<<<<<<<<<<<
@@ -3148,7 +3205,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_self->reference_sequence;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":291
+  /* "skbio/alignment/_ssw_wrapper.pyx":290
  * 
  *     @property
  *     def target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3163,7 +3220,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":303
+/* "skbio/alignment/_ssw_wrapper.pyx":302
  * 
  *     @property
  *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3203,20 +3260,20 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("aligned_query_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":317
+  /* "skbio/alignment/_ssw_wrapper.pyx":316
  * 
  *         """
  *         if self.query_sequence:             # <<<<<<<<<<<<<<
  *             return self._get_aligned_sequence(self.query_sequence,
  *                                               self._tuples_from_cigar(),
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":318
+    /* "skbio/alignment/_ssw_wrapper.pyx":317
  *         """
  *         if self.query_sequence:
  *             return self._get_aligned_sequence(self.query_sequence,             # <<<<<<<<<<<<<<
@@ -3224,19 +3281,19 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *                                               self.query_begin, self.query_end,
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":319
+    /* "skbio/alignment/_ssw_wrapper.pyx":318
  *         if self.query_sequence:
  *             return self._get_aligned_sequence(self.query_sequence,
  *                                               self._tuples_from_cigar(),             # <<<<<<<<<<<<<<
  *                                               self.query_begin, self.query_end,
  *                                               "D")
  */
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
     __pyx_t_7 = NULL;
     if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) {
@@ -3249,24 +3306,24 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       }
     }
     if (__pyx_t_7) {
-      __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
     } else {
-      __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     __Pyx_GOTREF(__pyx_t_5);
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":320
+    /* "skbio/alignment/_ssw_wrapper.pyx":319
  *             return self._get_aligned_sequence(self.query_sequence,
  *                                               self._tuples_from_cigar(),
  *                                               self.query_begin, self.query_end,             # <<<<<<<<<<<<<<
  *                                               "D")
  *         return None
  */
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_begin); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_begin); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_end); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_end); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_7);
     __pyx_t_8 = NULL;
     __pyx_t_9 = 0;
@@ -3280,7 +3337,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         __pyx_t_9 = 1;
       }
     }
-    __pyx_t_10 = PyTuple_New(5+__pyx_t_9); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_10 = PyTuple_New(5+__pyx_t_9); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_10);
     if (__pyx_t_8) {
       __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); __pyx_t_8 = NULL;
@@ -3300,16 +3357,24 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __pyx_t_5 = 0;
     __pyx_t_6 = 0;
     __pyx_t_7 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_r = __pyx_t_1;
     __pyx_t_1 = 0;
     goto __pyx_L0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":316
+ * 
+ *         """
+ *         if self.query_sequence:             # <<<<<<<<<<<<<<
+ *             return self._get_aligned_sequence(self.query_sequence,
+ *                                               self._tuples_from_cigar(),
+ */
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":322
+  /* "skbio/alignment/_ssw_wrapper.pyx":321
  *                                               self.query_begin, self.query_end,
  *                                               "D")
  *         return None             # <<<<<<<<<<<<<<
@@ -3321,7 +3386,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = Py_None;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":303
+  /* "skbio/alignment/_ssw_wrapper.pyx":302
  * 
  *     @property
  *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3347,7 +3412,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":325
+/* "skbio/alignment/_ssw_wrapper.pyx":324
  * 
  *     @property
  *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3387,20 +3452,20 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("aligned_target_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":339
+  /* "skbio/alignment/_ssw_wrapper.pyx":338
  * 
  *         """
  *         if self.target_sequence:             # <<<<<<<<<<<<<<
  *             return self._get_aligned_sequence(self.target_sequence,
  *                                               self._tuples_from_cigar(),
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":340
+    /* "skbio/alignment/_ssw_wrapper.pyx":339
  *         """
  *         if self.target_sequence:
  *             return self._get_aligned_sequence(self.target_sequence,             # <<<<<<<<<<<<<<
@@ -3408,19 +3473,19 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *                                               self.target_begin,
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":341
+    /* "skbio/alignment/_ssw_wrapper.pyx":340
  *         if self.target_sequence:
  *             return self._get_aligned_sequence(self.target_sequence,
  *                                               self._tuples_from_cigar(),             # <<<<<<<<<<<<<<
  *                                               self.target_begin,
  *                                               self.target_end_optimal,
  */
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
     __pyx_t_7 = NULL;
     if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) {
@@ -3433,32 +3498,32 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       }
     }
     if (__pyx_t_7) {
-      __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
     } else {
-      __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     __Pyx_GOTREF(__pyx_t_5);
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":342
+    /* "skbio/alignment/_ssw_wrapper.pyx":341
  *             return self._get_aligned_sequence(self.target_sequence,
  *                                               self._tuples_from_cigar(),
  *                                               self.target_begin,             # <<<<<<<<<<<<<<
  *                                               self.target_end_optimal,
  *                                               "I")
  */
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_begin); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_begin); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":343
+    /* "skbio/alignment/_ssw_wrapper.pyx":342
  *                                               self._tuples_from_cigar(),
  *                                               self.target_begin,
  *                                               self.target_end_optimal,             # <<<<<<<<<<<<<<
  *                                               "I")
  *         return None
  */
-    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_7);
     __pyx_t_8 = NULL;
     __pyx_t_9 = 0;
@@ -3472,7 +3537,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         __pyx_t_9 = 1;
       }
     }
-    __pyx_t_10 = PyTuple_New(5+__pyx_t_9); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_10 = PyTuple_New(5+__pyx_t_9); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_10);
     if (__pyx_t_8) {
       __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); __pyx_t_8 = NULL;
@@ -3492,16 +3557,24 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __pyx_t_5 = 0;
     __pyx_t_6 = 0;
     __pyx_t_7 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_r = __pyx_t_1;
     __pyx_t_1 = 0;
     goto __pyx_L0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":338
+ * 
+ *         """
+ *         if self.target_sequence:             # <<<<<<<<<<<<<<
+ *             return self._get_aligned_sequence(self.target_sequence,
+ *                                               self._tuples_from_cigar(),
+ */
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":345
+  /* "skbio/alignment/_ssw_wrapper.pyx":344
  *                                               self.target_end_optimal,
  *                                               "I")
  *         return None             # <<<<<<<<<<<<<<
@@ -3513,7 +3586,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = Py_None;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":325
+  /* "skbio/alignment/_ssw_wrapper.pyx":324
  * 
  *     @property
  *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3539,7 +3612,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":347
+/* "skbio/alignment/_ssw_wrapper.pyx":346
  *         return None
  * 
  *     def set_zero_based(self, is_zero_based):             # <<<<<<<<<<<<<<
@@ -3570,17 +3643,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("set_zero_based", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":351
+  /* "skbio/alignment/_ssw_wrapper.pyx":350
  * 
  *         """
  *         if is_zero_based:             # <<<<<<<<<<<<<<
  *             self.index_starts_at = 0
  *         else:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_is_zero_based); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_is_zero_based); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":352
+    /* "skbio/alignment/_ssw_wrapper.pyx":351
  *         """
  *         if is_zero_based:
  *             self.index_starts_at = 0             # <<<<<<<<<<<<<<
@@ -3588,22 +3661,30 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *             self.index_starts_at = 1
  */
     __pyx_v_self->index_starts_at = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":350
+ * 
+ *         """
+ *         if is_zero_based:             # <<<<<<<<<<<<<<
+ *             self.index_starts_at = 0
+ *         else:
+ */
     goto __pyx_L3;
   }
-  /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":354
+  /* "skbio/alignment/_ssw_wrapper.pyx":353
  *             self.index_starts_at = 0
  *         else:
  *             self.index_starts_at = 1             # <<<<<<<<<<<<<<
  * 
  *     def is_zero_based(self):
  */
+  /*else*/ {
     __pyx_v_self->index_starts_at = 1;
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":347
+  /* "skbio/alignment/_ssw_wrapper.pyx":346
  *         return None
  * 
  *     def set_zero_based(self, is_zero_based):             # <<<<<<<<<<<<<<
@@ -3623,7 +3704,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":356
+/* "skbio/alignment/_ssw_wrapper.pyx":355
  *             self.index_starts_at = 1
  * 
  *     def is_zero_based(self):             # <<<<<<<<<<<<<<
@@ -3654,7 +3735,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("is_zero_based", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":365
+  /* "skbio/alignment/_ssw_wrapper.pyx":364
  * 
  *         """
  *         return self.index_starts_at == 0             # <<<<<<<<<<<<<<
@@ -3662,13 +3743,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_self->index_starts_at == 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_self->index_starts_at == 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":356
+  /* "skbio/alignment/_ssw_wrapper.pyx":355
  *             self.index_starts_at = 1
  * 
  *     def is_zero_based(self):             # <<<<<<<<<<<<<<
@@ -3687,7 +3768,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":367
+/* "skbio/alignment/_ssw_wrapper.pyx":366
  *         return self.index_starts_at == 0
  * 
  *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,             # <<<<<<<<<<<<<<
@@ -3732,26 +3813,26 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         case  1:
         if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_tuple_cigar)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  2:
         if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_begin)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  3:
         if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_end)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  4:
         if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_gap_type)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_aligned_sequence") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_aligned_sequence") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
       goto __pyx_L5_argtuple_error;
@@ -3770,7 +3851,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure._get_aligned_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -3809,14 +3890,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_get_aligned_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":370
+  /* "skbio/alignment/_ssw_wrapper.pyx":369
  *                               gap_type):
  *         # Save the original index scheme and then set it to 0 (1/2)
  *         orig_z_base = self.is_zero_based()             # <<<<<<<<<<<<<<
  *         self.set_zero_based(True)
  *         aligned_sequence = []
  */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_is_zero_based); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_is_zero_based); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_t_3 = NULL;
   if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_2))) {
@@ -3829,58 +3910,58 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     }
   }
   if (__pyx_t_3) {
-    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   } else {
-    __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __pyx_v_orig_z_base = __pyx_t_1;
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":371
+  /* "skbio/alignment/_ssw_wrapper.pyx":370
  *         # Save the original index scheme and then set it to 0 (1/2)
  *         orig_z_base = self.is_zero_based()
  *         self.set_zero_based(True)             # <<<<<<<<<<<<<<
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":372
+  /* "skbio/alignment/_ssw_wrapper.pyx":371
  *         orig_z_base = self.is_zero_based()
  *         self.set_zero_based(True)
  *         aligned_sequence = []             # <<<<<<<<<<<<<<
  *         seq = sequence[begin:end + 1]
  *         index = 0
  */
-  __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_v_aligned_sequence = ((PyObject*)__pyx_t_2);
   __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":373
+  /* "skbio/alignment/_ssw_wrapper.pyx":372
  *         self.set_zero_based(True)
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]             # <<<<<<<<<<<<<<
  *         index = 0
  *         for length, mid in tuple_cigar:
  */
-  __pyx_t_2 = PyNumber_Add(__pyx_v_end, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_v_end, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_sequence, 0, 0, &__pyx_v_begin, &__pyx_t_2, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_sequence, 0, 0, &__pyx_v_begin, &__pyx_t_2, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __pyx_v_seq = __pyx_t_1;
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":374
+  /* "skbio/alignment/_ssw_wrapper.pyx":373
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]
  *         index = 0             # <<<<<<<<<<<<<<
@@ -3890,7 +3971,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_INCREF(__pyx_int_0);
   __pyx_v_index = __pyx_int_0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":375
+  /* "skbio/alignment/_ssw_wrapper.pyx":374
  *         seq = sequence[begin:end + 1]
  *         index = 0
  *         for length, mid in tuple_cigar:             # <<<<<<<<<<<<<<
@@ -3901,26 +3982,26 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __pyx_t_1 = __pyx_v_tuple_cigar; __Pyx_INCREF(__pyx_t_1); __pyx_t_4 = 0;
     __pyx_t_5 = NULL;
   } else {
-    __pyx_t_4 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_tuple_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_tuple_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_5 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   for (;;) {
     if (likely(!__pyx_t_5)) {
       if (likely(PyList_CheckExact(__pyx_t_1))) {
         if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_1)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_2);
         #endif
       } else {
         if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_2);
         #endif
       }
@@ -3930,7 +4011,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -3946,7 +4027,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       if (unlikely(size != 2)) {
         if (size > 2) __Pyx_RaiseTooManyValuesError(2);
         else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       #if CYTHON_COMPILING_IN_CPYTHON
       if (likely(PyTuple_CheckExact(sequence))) {
@@ -3959,15 +4040,15 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       __Pyx_INCREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_t_6);
       #else
-      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       #endif
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     } else {
       Py_ssize_t index = -1;
-      __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_7);
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
       __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext;
@@ -3975,7 +4056,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       __Pyx_GOTREF(__pyx_t_3);
       index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L5_unpacking_failed;
       __Pyx_GOTREF(__pyx_t_6);
-      if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_t_8 = NULL;
       __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
       goto __pyx_L6_unpacking_done;
@@ -3983,7 +4064,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
       __pyx_t_8 = NULL;
       if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_L6_unpacking_done:;
     }
     __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3);
@@ -3991,36 +4072,36 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __Pyx_XDECREF_SET(__pyx_v_mid, __pyx_t_6);
     __pyx_t_6 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":376
+    /* "skbio/alignment/_ssw_wrapper.pyx":375
  *         index = 0
  *         for length, mid in tuple_cigar:
  *             if mid == 'M':             # <<<<<<<<<<<<<<
  *                 aligned_sequence += [seq[i]
  *                                      for i in range(index, length + index)]
  */
-    __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_mid, __pyx_n_s_M, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_mid, __pyx_n_s_M, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     if (__pyx_t_9) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":377
+      /* "skbio/alignment/_ssw_wrapper.pyx":376
  *         for length, mid in tuple_cigar:
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
  *                                      for i in range(index, length + index)]
  *                 index += length
  */
-      __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":378
+      /* "skbio/alignment/_ssw_wrapper.pyx":377
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]
  *                                      for i in range(index, length + index)]             # <<<<<<<<<<<<<<
  *                 index += length
  *             elif mid == gap_type:
  */
-      __pyx_t_6 = PyNumber_Add(__pyx_v_length, __pyx_v_index); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = PyNumber_Add(__pyx_v_length, __pyx_v_index); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
-      __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_v_index);
       __Pyx_GIVEREF(__pyx_v_index);
@@ -4028,16 +4109,16 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       __Pyx_GIVEREF(__pyx_t_6);
       PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_6);
       __pyx_t_6 = 0;
-      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (likely(PyList_CheckExact(__pyx_t_6)) || PyTuple_CheckExact(__pyx_t_6)) {
         __pyx_t_3 = __pyx_t_6; __Pyx_INCREF(__pyx_t_3); __pyx_t_10 = 0;
         __pyx_t_11 = NULL;
       } else {
-        __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
-        __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
       for (;;) {
@@ -4045,17 +4126,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
           if (likely(PyList_CheckExact(__pyx_t_3))) {
             if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_3)) break;
             #if CYTHON_COMPILING_IN_CPYTHON
-            __pyx_t_6 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            __pyx_t_6 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
             #else
-            __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
             __Pyx_GOTREF(__pyx_t_6);
             #endif
           } else {
             if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
             #if CYTHON_COMPILING_IN_CPYTHON
-            __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
             #else
-            __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
             __Pyx_GOTREF(__pyx_t_6);
             #endif
           }
@@ -4065,7 +4146,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
             PyObject* exc_type = PyErr_Occurred();
             if (exc_type) {
               if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-              else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+              else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
             }
             break;
           }
@@ -4074,19 +4155,19 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
         __pyx_t_6 = 0;
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":377
+        /* "skbio/alignment/_ssw_wrapper.pyx":376
  *         for length, mid in tuple_cigar:
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
  *                                      for i in range(index, length + index)]
  *                 index += length
  */
-        __pyx_t_6 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __pyx_t_6 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
         __Pyx_GOTREF(__pyx_t_6);
-        if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":378
+        /* "skbio/alignment/_ssw_wrapper.pyx":377
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]
  *                                      for i in range(index, length + index)]             # <<<<<<<<<<<<<<
@@ -4096,74 +4177,98 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       }
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":377
+      /* "skbio/alignment/_ssw_wrapper.pyx":376
  *         for length, mid in tuple_cigar:
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
  *                                      for i in range(index, length + index)]
  *                 index += length
  */
-      __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
       __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_3));
       __pyx_t_3 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":379
+      /* "skbio/alignment/_ssw_wrapper.pyx":378
  *                 aligned_sequence += [seq[i]
  *                                      for i in range(index, length + index)]
  *                 index += length             # <<<<<<<<<<<<<<
  *             elif mid == gap_type:
  *                 aligned_sequence += (['-'] * length)
  */
-      __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_index, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_index, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
       __pyx_t_3 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":375
+ *         index = 0
+ *         for length, mid in tuple_cigar:
+ *             if mid == 'M':             # <<<<<<<<<<<<<<
+ *                 aligned_sequence += [seq[i]
+ *                                      for i in range(index, length + index)]
+ */
       goto __pyx_L7;
     }
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":380
+    /* "skbio/alignment/_ssw_wrapper.pyx":379
  *                                      for i in range(index, length + index)]
  *                 index += length
  *             elif mid == gap_type:             # <<<<<<<<<<<<<<
  *                 aligned_sequence += (['-'] * length)
  *             else:
  */
-    __pyx_t_3 = PyObject_RichCompare(__pyx_v_mid, __pyx_v_gap_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyObject_RichCompare(__pyx_v_mid, __pyx_v_gap_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     if (__pyx_t_9) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":381
+      /* "skbio/alignment/_ssw_wrapper.pyx":380
  *                 index += length
  *             elif mid == gap_type:
  *                 aligned_sequence += (['-'] * length)             # <<<<<<<<<<<<<<
  *             else:
  *                 pass
  */
-      __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_kp_s__8);
       __Pyx_GIVEREF(__pyx_kp_s__8);
       PyList_SET_ITEM(__pyx_t_3, 0, __pyx_kp_s__8);
-      { PyObject* __pyx_temp = PyNumber_InPlaceMultiply(__pyx_t_3, __pyx_v_length); if (unlikely(!__pyx_temp)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      { PyObject* __pyx_temp = PyNumber_InPlaceMultiply(__pyx_t_3, __pyx_v_length); if (unlikely(!__pyx_temp)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_temp);
         __Pyx_DECREF(__pyx_t_3);
         __pyx_t_3 = __pyx_temp;
       }
-      __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_2));
       __pyx_t_2 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":379
+ *                                      for i in range(index, length + index)]
+ *                 index += length
+ *             elif mid == gap_type:             # <<<<<<<<<<<<<<
+ *                 aligned_sequence += (['-'] * length)
+ *             else:
+ */
       goto __pyx_L7;
     }
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":382
+ *                 aligned_sequence += (['-'] * length)
+ *             else:
+ *                 pass             # <<<<<<<<<<<<<<
+ *         # Our sequence end is sometimes beyond the cigar:
+ *         aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]
+ */
     /*else*/ {
     }
     __pyx_L7:;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":375
+    /* "skbio/alignment/_ssw_wrapper.pyx":374
  *         seq = sequence[begin:end + 1]
  *         index = 0
  *         for length, mid in tuple_cigar:             # <<<<<<<<<<<<<<
@@ -4173,21 +4278,21 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":385
+  /* "skbio/alignment/_ssw_wrapper.pyx":384
  *                 pass
  *         # Our sequence end is sometimes beyond the cigar:
  *         aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]             # <<<<<<<<<<<<<<
  *         # Revert our index scheme to the original (2/2)
  *         self.set_zero_based(orig_z_base)
  */
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyNumber_Subtract(__pyx_v_end, __pyx_v_begin); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyNumber_Subtract(__pyx_v_end, __pyx_v_begin); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_INCREF(__pyx_v_index);
   __Pyx_GIVEREF(__pyx_v_index);
@@ -4195,16 +4300,16 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_GIVEREF(__pyx_t_3);
   PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
   __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) {
     __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0;
     __pyx_t_5 = NULL;
   } else {
-    __pyx_t_4 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_5 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   for (;;) {
@@ -4212,17 +4317,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       if (likely(PyList_CheckExact(__pyx_t_2))) {
         if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_2)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         #endif
       } else {
         if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         #endif
       }
@@ -4232,7 +4337,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -4240,26 +4345,26 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     }
     __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3);
     __pyx_t_3 = 0;
-    __pyx_t_3 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_3 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_3);
-    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_3))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_3))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_2));
   __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":387
+  /* "skbio/alignment/_ssw_wrapper.pyx":386
  *         aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]
  *         # Revert our index scheme to the original (2/2)
  *         self.set_zero_based(orig_z_base)             # <<<<<<<<<<<<<<
  *         return "".join(aligned_sequence)
  * 
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_t_3 = NULL;
   if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) {
@@ -4272,23 +4377,23 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     }
   }
   if (!__pyx_t_3) {
-    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_orig_z_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_orig_z_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
   } else {
-    __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
     __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL;
     __Pyx_INCREF(__pyx_v_orig_z_base);
     __Pyx_GIVEREF(__pyx_v_orig_z_base);
     PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_v_orig_z_base);
-    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":388
+  /* "skbio/alignment/_ssw_wrapper.pyx":387
  *         # Revert our index scheme to the original (2/2)
  *         self.set_zero_based(orig_z_base)
  *         return "".join(aligned_sequence)             # <<<<<<<<<<<<<<
@@ -4296,13 +4401,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     def _tuples_from_cigar(self):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_aligned_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_aligned_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_r = __pyx_t_2;
   __pyx_t_2 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":367
+  /* "skbio/alignment/_ssw_wrapper.pyx":366
  *         return self.index_starts_at == 0
  * 
  *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,             # <<<<<<<<<<<<<<
@@ -4332,7 +4437,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":390
+/* "skbio/alignment/_ssw_wrapper.pyx":389
  *         return "".join(aligned_sequence)
  * 
  *     def _tuples_from_cigar(self):             # <<<<<<<<<<<<<<
@@ -4372,46 +4477,46 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_tuples_from_cigar", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":391
+  /* "skbio/alignment/_ssw_wrapper.pyx":390
  * 
  *     def _tuples_from_cigar(self):
  *         tuples = []             # <<<<<<<<<<<<<<
  *         length_stack = []
  *         for character in self.cigar:
  */
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_tuples = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":392
+  /* "skbio/alignment/_ssw_wrapper.pyx":391
  *     def _tuples_from_cigar(self):
  *         tuples = []
  *         length_stack = []             # <<<<<<<<<<<<<<
  *         for character in self.cigar:
  *             if character.isdigit():
  */
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_length_stack = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":393
+  /* "skbio/alignment/_ssw_wrapper.pyx":392
  *         tuples = []
  *         length_stack = []
  *         for character in self.cigar:             # <<<<<<<<<<<<<<
  *             if character.isdigit():
  *                 length_stack.append(character)
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) {
     __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
     __pyx_t_4 = NULL;
   } else {
-    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   for (;;) {
@@ -4419,17 +4524,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       if (likely(PyList_CheckExact(__pyx_t_2))) {
         if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_1);
         #endif
       } else {
         if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_1);
         #endif
       }
@@ -4439,7 +4544,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -4448,14 +4553,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __Pyx_XDECREF_SET(__pyx_v_character, __pyx_t_1);
     __pyx_t_1 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":394
+    /* "skbio/alignment/_ssw_wrapper.pyx":393
  *         length_stack = []
  *         for character in self.cigar:
  *             if character.isdigit():             # <<<<<<<<<<<<<<
  *                 length_stack.append(character)
  *             else:
  */
-    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_character, __pyx_n_s_isdigit); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_character, __pyx_n_s_isdigit); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_5);
     __pyx_t_6 = NULL;
     if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) {
@@ -4468,42 +4573,50 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       }
     }
     if (__pyx_t_6) {
-      __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
     } else {
-      __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     if (__pyx_t_7) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":395
+      /* "skbio/alignment/_ssw_wrapper.pyx":394
  *         for character in self.cigar:
  *             if character.isdigit():
  *                 length_stack.append(character)             # <<<<<<<<<<<<<<
  *             else:
  *                 tuples.append((int("".join(length_stack)), character))
  */
-      __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_length_stack, __pyx_v_character); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_length_stack, __pyx_v_character); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":393
+ *         length_stack = []
+ *         for character in self.cigar:
+ *             if character.isdigit():             # <<<<<<<<<<<<<<
+ *                 length_stack.append(character)
+ *             else:
+ */
       goto __pyx_L5;
     }
-    /*else*/ {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":397
+    /* "skbio/alignment/_ssw_wrapper.pyx":396
  *                 length_stack.append(character)
  *             else:
  *                 tuples.append((int("".join(length_stack)), character))             # <<<<<<<<<<<<<<
  *                 length_stack = []
  *         return tuples
  */
-      __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_length_stack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    /*else*/ {
+      __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_length_stack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_5 = PyNumber_Int(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = PyNumber_Int(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_5);
       __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-      __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
       __Pyx_GIVEREF(__pyx_t_5);
       PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
@@ -4511,24 +4624,24 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       __Pyx_GIVEREF(__pyx_v_character);
       PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_character);
       __pyx_t_5 = 0;
-      __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_tuples, __pyx_t_1); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_tuples, __pyx_t_1); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":398
+      /* "skbio/alignment/_ssw_wrapper.pyx":397
  *             else:
  *                 tuples.append((int("".join(length_stack)), character))
  *                 length_stack = []             # <<<<<<<<<<<<<<
  *         return tuples
  * 
  */
-      __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
       __Pyx_DECREF_SET(__pyx_v_length_stack, ((PyObject*)__pyx_t_1));
       __pyx_t_1 = 0;
     }
     __pyx_L5:;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":393
+    /* "skbio/alignment/_ssw_wrapper.pyx":392
  *         tuples = []
  *         length_stack = []
  *         for character in self.cigar:             # <<<<<<<<<<<<<<
@@ -4538,7 +4651,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":399
+  /* "skbio/alignment/_ssw_wrapper.pyx":398
  *                 tuples.append((int("".join(length_stack)), character))
  *                 length_stack = []
  *         return tuples             # <<<<<<<<<<<<<<
@@ -4550,7 +4663,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_tuples;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":390
+  /* "skbio/alignment/_ssw_wrapper.pyx":389
  *         return "".join(aligned_sequence)
  * 
  *     def _tuples_from_cigar(self):             # <<<<<<<<<<<<<<
@@ -4575,7 +4688,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":543
+/* "skbio/alignment/_ssw_wrapper.pyx":542
  *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
  * 
  *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
@@ -4616,7 +4729,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
     values[3] = ((PyObject *)__pyx_int_2);
     values[4] = ((PyObject *)__pyx_int_15);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":548
+    /* "skbio/alignment/_ssw_wrapper.pyx":547
  *                   score_size=2,  # BLASTN Default
  *                   mask_length=15,  # Minimum length for a suboptimal alignment
  *                   mask_auto=True,             # <<<<<<<<<<<<<<
@@ -4625,7 +4738,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[5] = ((PyObject *)Py_True);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":549
+    /* "skbio/alignment/_ssw_wrapper.pyx":548
  *                   mask_length=15,  # Minimum length for a suboptimal alignment
  *                   mask_auto=True,
  *                   score_only=False,             # <<<<<<<<<<<<<<
@@ -4634,7 +4747,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[6] = ((PyObject *)Py_False);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":550
+    /* "skbio/alignment/_ssw_wrapper.pyx":549
  *                   mask_auto=True,
  *                   score_only=False,
  *                   score_filter=None,             # <<<<<<<<<<<<<<
@@ -4643,7 +4756,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[7] = ((PyObject *)Py_None);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":551
+    /* "skbio/alignment/_ssw_wrapper.pyx":550
  *                   score_only=False,
  *                   score_filter=None,
  *                   distance_filter=None,             # <<<<<<<<<<<<<<
@@ -4652,7 +4765,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[8] = ((PyObject *)Py_None);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":552
+    /* "skbio/alignment/_ssw_wrapper.pyx":551
  *                   score_filter=None,
  *                   distance_filter=None,
  *                   override_skip_babp=False,             # <<<<<<<<<<<<<<
@@ -4661,7 +4774,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[9] = ((PyObject *)Py_False);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":553
+    /* "skbio/alignment/_ssw_wrapper.pyx":552
  *                   distance_filter=None,
  *                   override_skip_babp=False,
  *                   protein=False,             # <<<<<<<<<<<<<<
@@ -4672,7 +4785,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
     values[11] = ((PyObject *)__pyx_int_2);
     values[12] = ((PyObject *)__pyx_int_neg_3);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":556
+    /* "skbio/alignment/_ssw_wrapper.pyx":555
  *                   match_score=2,  # BLASTN Default
  *                   mismatch_score=-3,  # BLASTN Default
  *                   substitution_matrix=None,             # <<<<<<<<<<<<<<
@@ -4681,7 +4794,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[13] = ((PyObject *)Py_None);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":557
+    /* "skbio/alignment/_ssw_wrapper.pyx":556
  *                   mismatch_score=-3,  # BLASTN Default
  *                   substitution_matrix=None,
  *                   suppress_sequences=False,             # <<<<<<<<<<<<<<
@@ -4690,7 +4803,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[14] = ((PyObject *)Py_False);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":558
+    /* "skbio/alignment/_ssw_wrapper.pyx":557
  *                   substitution_matrix=None,
  *                   suppress_sequences=False,
  *                   zero_index=True):             # <<<<<<<<<<<<<<
@@ -4803,7 +4916,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 542; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else {
       switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -4846,7 +4959,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 1, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 1, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 542; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -4854,7 +4967,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
   __pyx_L4_argument_unpacking_done:;
   __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___cinit__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self), __pyx_v_query_sequence, __pyx_v_gap_open_penalty, __pyx_v_gap_extend_penalty, __pyx_v_score_size, __pyx_v_mask_length, __pyx_v_mask_auto, __pyx_v_score_only, __pyx_v_score_filter, __pyx_v_distance_filter, __pyx_v_override_skip_babp, __pyx_v_protein, __pyx_v_match_score, __pyx_v_mismatch_score, __pyx_v_substitution [...]
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":543
+  /* "skbio/alignment/_ssw_wrapper.pyx":542
  *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
  * 
  *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
@@ -4909,14 +5022,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_pybuffernd_read_seq.data = NULL;
   __pyx_pybuffernd_read_seq.rcbuffer = &__pyx_pybuffer_read_seq;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":560
+  /* "skbio/alignment/_ssw_wrapper.pyx":559
  *                   zero_index=True):
  *         # initalize our values
  *         self.read_sequence = query_sequence             # <<<<<<<<<<<<<<
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")
  */
-  if (!(likely(PyString_CheckExact(__pyx_v_query_sequence))||((__pyx_v_query_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_query_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_v_query_sequence))||((__pyx_v_query_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_query_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_query_sequence;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -4925,79 +5038,95 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_v_self->read_sequence = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":561
+  /* "skbio/alignment/_ssw_wrapper.pyx":560
  *         # initalize our values
  *         self.read_sequence = query_sequence
  *         if gap_open_penalty <= 0:             # <<<<<<<<<<<<<<
  *             raise ValueError("`gap_open_penalty` must be > 0")
  *         self.gap_open_penalty = gap_open_penalty
  */
-  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_open_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_open_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":562
+    /* "skbio/alignment/_ssw_wrapper.pyx":561
  *         self.read_sequence = query_sequence
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  */
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_Raise(__pyx_t_1, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":560
+ *         # initalize our values
+ *         self.read_sequence = query_sequence
+ *         if gap_open_penalty <= 0:             # <<<<<<<<<<<<<<
+ *             raise ValueError("`gap_open_penalty` must be > 0")
+ *         self.gap_open_penalty = gap_open_penalty
+ */
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":563
+  /* "skbio/alignment/_ssw_wrapper.pyx":562
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")
  *         self.gap_open_penalty = gap_open_penalty             # <<<<<<<<<<<<<<
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  */
-  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_open_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_open_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_self->gap_open_penalty = __pyx_t_3;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":564
+  /* "skbio/alignment/_ssw_wrapper.pyx":563
  *             raise ValueError("`gap_open_penalty` must be > 0")
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:             # <<<<<<<<<<<<<<
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty
  */
-  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_extend_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_extend_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":565
+    /* "skbio/alignment/_ssw_wrapper.pyx":564
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \
  */
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_Raise(__pyx_t_1, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":563
+ *             raise ValueError("`gap_open_penalty` must be > 0")
+ *         self.gap_open_penalty = gap_open_penalty
+ *         if gap_extend_penalty <= 0:             # <<<<<<<<<<<<<<
+ *             raise ValueError("`gap_extend_penalty` must be > 0")
+ *         self.gap_extend_penalty = gap_extend_penalty
+ */
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":566
+  /* "skbio/alignment/_ssw_wrapper.pyx":565
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty             # <<<<<<<<<<<<<<
  *         self.distance_filter = 0 if distance_filter is None else \
  *             distance_filter
  */
-  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_extend_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_extend_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_self->gap_extend_penalty = __pyx_t_3;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":567
+  /* "skbio/alignment/_ssw_wrapper.pyx":566
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \             # <<<<<<<<<<<<<<
@@ -5009,18 +5138,18 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
     __pyx_t_4 = 0;
   } else {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":568
+    /* "skbio/alignment/_ssw_wrapper.pyx":567
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \
  *             distance_filter             # <<<<<<<<<<<<<<
  *         self.score_filter = 0 if score_filter is None else score_filter
  *         self.suppress_sequences = suppress_sequences
  */
-    __pyx_t_5 = __Pyx_PyInt_As_npy_int32(__pyx_v_distance_filter); if (unlikely((__pyx_t_5 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyInt_As_npy_int32(__pyx_v_distance_filter); if (unlikely((__pyx_t_5 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 567; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_t_4 = __pyx_t_5;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":567
+  /* "skbio/alignment/_ssw_wrapper.pyx":566
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \             # <<<<<<<<<<<<<<
@@ -5029,7 +5158,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
  */
   __pyx_v_self->distance_filter = __pyx_t_4;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":569
+  /* "skbio/alignment/_ssw_wrapper.pyx":568
  *         self.distance_filter = 0 if distance_filter is None else \
  *             distance_filter
  *         self.score_filter = 0 if score_filter is None else score_filter             # <<<<<<<<<<<<<<
@@ -5040,19 +5169,19 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   if ((__pyx_t_2 != 0)) {
     __pyx_t_6 = 0;
   } else {
-    __pyx_t_7 = __Pyx_PyInt_As_npy_uint16(__pyx_v_score_filter); if (unlikely((__pyx_t_7 == (npy_uint16)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyInt_As_npy_uint16(__pyx_v_score_filter); if (unlikely((__pyx_t_7 == (npy_uint16)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_t_6 = __pyx_t_7;
   }
   __pyx_v_self->score_filter = __pyx_t_6;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":570
+  /* "skbio/alignment/_ssw_wrapper.pyx":569
  *             distance_filter
  *         self.score_filter = 0 if score_filter is None else score_filter
  *         self.suppress_sequences = suppress_sequences             # <<<<<<<<<<<<<<
  *         self.is_protein = protein
  *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)
  */
-  if (!(likely(((__pyx_v_suppress_sequences) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_suppress_sequences, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_v_suppress_sequences) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_suppress_sequences, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_suppress_sequences;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -5061,14 +5190,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_v_self->suppress_sequences = ((PyBoolObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":571
+  /* "skbio/alignment/_ssw_wrapper.pyx":570
  *         self.score_filter = 0 if score_filter is None else score_filter
  *         self.suppress_sequences = suppress_sequences
  *         self.is_protein = protein             # <<<<<<<<<<<<<<
  *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)
  *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
  */
-  if (!(likely(((__pyx_v_protein) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_protein, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_v_protein) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_protein, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_protein;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -5077,14 +5206,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_v_self->is_protein = ((PyBoolObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":572
+  /* "skbio/alignment/_ssw_wrapper.pyx":571
  *         self.suppress_sequences = suppress_sequences
  *         self.is_protein = protein
  *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)             # <<<<<<<<<<<<<<
  *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
  *         # Dijkstra knows what's up:
  */
-  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_bit_flag); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_bit_flag); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_8);
   __pyx_t_9 = NULL;
   __pyx_t_10 = 0;
@@ -5098,7 +5227,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
       __pyx_t_10 = 1;
     }
   }
-  __pyx_t_11 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_11 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_11);
   if (__pyx_t_9) {
     __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_9); __pyx_t_9 = NULL;
@@ -5109,22 +5238,22 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __Pyx_INCREF(__pyx_v_score_only);
   __Pyx_GIVEREF(__pyx_v_score_only);
   PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_10, __pyx_v_score_only);
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_11, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_11, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
   __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_t_1); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_t_1); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_self->bit_flag = __pyx_t_3;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":575
+  /* "skbio/alignment/_ssw_wrapper.pyx":574
  *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
  *         # Dijkstra knows what's up:
  *         self.index_starts_at = 0 if zero_index else 1             # <<<<<<<<<<<<<<
  *         # set up our matrix
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
  */
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_zero_index); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_zero_index); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_2) {
     __pyx_t_12 = 0;
   } else {
@@ -5132,7 +5261,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   }
   __pyx_v_self->index_starts_at = __pyx_t_12;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":578
+  /* "skbio/alignment/_ssw_wrapper.pyx":577
  *         # set up our matrix
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
  *         if substitution_matrix is None:             # <<<<<<<<<<<<<<
@@ -5143,38 +5272,46 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_t_13 = (__pyx_t_2 != 0);
   if (__pyx_t_13) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":579
+    /* "skbio/alignment/_ssw_wrapper.pyx":578
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
  *         if substitution_matrix is None:
  *             if protein:             # <<<<<<<<<<<<<<
  *                 raise Exception("Must provide a substitution matrix for"
  *                                 " protein sequences")
  */
-    __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_protein); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_protein); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 578; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     if (__pyx_t_13) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":580
+      /* "skbio/alignment/_ssw_wrapper.pyx":579
  *         if substitution_matrix is None:
  *             if protein:
  *                 raise Exception("Must provide a substitution matrix for"             # <<<<<<<<<<<<<<
  *                                 " protein sequences")
  *             matrix = self._build_match_matrix(match_score, mismatch_score)
  */
-      __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_Exception, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_Exception, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
       __Pyx_Raise(__pyx_t_1, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":578
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
+ *         if substitution_matrix is None:
+ *             if protein:             # <<<<<<<<<<<<<<
+ *                 raise Exception("Must provide a substitution matrix for"
+ *                                 " protein sequences")
+ */
     }
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":582
+    /* "skbio/alignment/_ssw_wrapper.pyx":581
  *                 raise Exception("Must provide a substitution matrix for"
  *                                 " protein sequences")
  *             matrix = self._build_match_matrix(match_score, mismatch_score)             # <<<<<<<<<<<<<<
  *         else:
  *             matrix = self._convert_dict2d_to_matrix(substitution_matrix)
  */
-    __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_build_match_matrix(__pyx_v_self, __pyx_v_match_score, __pyx_v_mismatch_score)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_build_match_matrix(__pyx_v_self, __pyx_v_match_score, __pyx_v_mismatch_score)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     {
       __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -5190,22 +5327,30 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
         }
       }
       __pyx_pybuffernd_matrix.diminfo[0].strides = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_matrix.diminfo[0].shape = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.shape[0];
-      if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     __pyx_v_matrix = ((PyArrayObject *)__pyx_t_1);
     __pyx_t_1 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":577
+ *         # set up our matrix
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
+ *         if substitution_matrix is None:             # <<<<<<<<<<<<<<
+ *             if protein:
+ *                 raise Exception("Must provide a substitution matrix for"
+ */
     goto __pyx_L5;
   }
-  /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":584
+  /* "skbio/alignment/_ssw_wrapper.pyx":583
  *             matrix = self._build_match_matrix(match_score, mismatch_score)
  *         else:
  *             matrix = self._convert_dict2d_to_matrix(substitution_matrix)             # <<<<<<<<<<<<<<
  *         # Set up our mask_length
  *         # Mask is recommended to be max(query_sequence/2, 15)
  */
-    __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_substitution_matrix)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*else*/ {
+    __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_substitution_matrix)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     {
       __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -5221,84 +5366,98 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
         }
       }
       __pyx_pybuffernd_matrix.diminfo[0].strides = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_matrix.diminfo[0].shape = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.shape[0];
-      if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     __pyx_v_matrix = ((PyArrayObject *)__pyx_t_1);
     __pyx_t_1 = 0;
   }
   __pyx_L5:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":587
+  /* "skbio/alignment/_ssw_wrapper.pyx":586
  *         # Set up our mask_length
  *         # Mask is recommended to be max(query_sequence/2, 15)
  *         if mask_auto:             # <<<<<<<<<<<<<<
  *             self.mask_length = len(query_sequence) / 2
  *             if self.mask_length < mask_length:
  */
-  __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_mask_auto); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_mask_auto); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_13) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":588
+    /* "skbio/alignment/_ssw_wrapper.pyx":587
  *         # Mask is recommended to be max(query_sequence/2, 15)
  *         if mask_auto:
  *             self.mask_length = len(query_sequence) / 2             # <<<<<<<<<<<<<<
  *             if self.mask_length < mask_length:
  *                 self.mask_length = mask_length
  */
-    __pyx_t_10 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_10 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_v_self->mask_length = __Pyx_div_Py_ssize_t(__pyx_t_10, 2);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":589
+    /* "skbio/alignment/_ssw_wrapper.pyx":588
  *         if mask_auto:
  *             self.mask_length = len(query_sequence) / 2
  *             if self.mask_length < mask_length:             # <<<<<<<<<<<<<<
  *                 self.mask_length = mask_length
  *         else:
  */
-    __pyx_t_1 = __Pyx_PyInt_From_npy_int32(__pyx_v_self->mask_length); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyInt_From_npy_int32(__pyx_v_self->mask_length); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_8 = PyObject_RichCompare(__pyx_t_1, __pyx_v_mask_length, Py_LT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_8 = PyObject_RichCompare(__pyx_t_1, __pyx_v_mask_length, Py_LT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
     if (__pyx_t_13) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":590
+      /* "skbio/alignment/_ssw_wrapper.pyx":589
  *             self.mask_length = len(query_sequence) / 2
  *             if self.mask_length < mask_length:
  *                 self.mask_length = mask_length             # <<<<<<<<<<<<<<
  *         else:
  *             self.mask_length = mask_length
  */
-      __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_v_self->mask_length = __pyx_t_4;
-      goto __pyx_L8;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":588
+ *         if mask_auto:
+ *             self.mask_length = len(query_sequence) / 2
+ *             if self.mask_length < mask_length:             # <<<<<<<<<<<<<<
+ *                 self.mask_length = mask_length
+ *         else:
+ */
     }
-    __pyx_L8:;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":586
+ *         # Set up our mask_length
+ *         # Mask is recommended to be max(query_sequence/2, 15)
+ *         if mask_auto:             # <<<<<<<<<<<<<<
+ *             self.mask_length = len(query_sequence) / 2
+ *             if self.mask_length < mask_length:
+ */
     goto __pyx_L7;
   }
-  /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":592
+  /* "skbio/alignment/_ssw_wrapper.pyx":591
  *                 self.mask_length = mask_length
  *         else:
  *             self.mask_length = mask_length             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] read_seq
  */
-    __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*else*/ {
+    __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_v_self->mask_length = __pyx_t_4;
   }
   __pyx_L7:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":595
+  /* "skbio/alignment/_ssw_wrapper.pyx":594
  * 
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] read_seq
  *         read_seq = self._seq_converter(query_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int32_t read_length
  */
-  __pyx_t_8 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_query_sequence)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_8 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_query_sequence)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_8);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -5314,39 +5473,39 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
       }
     }
     __pyx_pybuffernd_read_seq.diminfo[0].strides = __pyx_pybuffernd_read_seq.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_read_seq.diminfo[0].shape = __pyx_pybuffernd_read_seq.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_v_read_seq = ((PyArrayObject *)__pyx_t_8);
   __pyx_t_8 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":598
+  /* "skbio/alignment/_ssw_wrapper.pyx":597
  * 
  *         cdef cnp.int32_t read_length
  *         read_length = len(query_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int8_t s_size
  */
-  __pyx_t_10 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_10 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_read_length = __pyx_t_10;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":601
+  /* "skbio/alignment/_ssw_wrapper.pyx":600
  * 
  *         cdef cnp.int8_t s_size
  *         s_size = score_size             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int32_t m_width
  */
-  __pyx_t_17 = __Pyx_PyInt_As_npy_int8(__pyx_v_score_size); if (unlikely((__pyx_t_17 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 601; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_17 = __Pyx_PyInt_As_npy_int8(__pyx_v_score_size); if (unlikely((__pyx_t_17 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_s_size = __pyx_t_17;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":604
+  /* "skbio/alignment/_ssw_wrapper.pyx":603
  * 
  *         cdef cnp.int32_t m_width
  *         m_width = 24 if self.is_protein else 5             # <<<<<<<<<<<<<<
  * 
  *         cdef s_profile* p
  */
-  __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_13) {
     __pyx_t_4 = 24;
   } else {
@@ -5354,7 +5513,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   }
   __pyx_v_m_width = __pyx_t_4;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":607
+  /* "skbio/alignment/_ssw_wrapper.pyx":606
  * 
  *         cdef s_profile* p
  *         self.profile = ssw_init(<cnp.int8_t*> read_seq.data,             # <<<<<<<<<<<<<<
@@ -5363,7 +5522,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
  */
   __pyx_v_self->profile = ssw_init(((__pyx_t_5numpy_int8_t *)__pyx_v_read_seq->data), __pyx_v_read_length, ((__pyx_t_5numpy_int8_t *)__pyx_v_matrix->data), __pyx_v_m_width, __pyx_v_s_size);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":614
+  /* "skbio/alignment/_ssw_wrapper.pyx":613
  * 
  *         # A hack to keep the python GC from eating our data
  *         self.__KEEP_IT_IN_SCOPE_read = read_seq             # <<<<<<<<<<<<<<
@@ -5376,7 +5535,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_read));
   __pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_read = ((PyArrayObject *)__pyx_v_read_seq);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":615
+  /* "skbio/alignment/_ssw_wrapper.pyx":614
  *         # A hack to keep the python GC from eating our data
  *         self.__KEEP_IT_IN_SCOPE_read = read_seq
  *         self.__KEEP_IT_IN_SCOPE_matrix = matrix             # <<<<<<<<<<<<<<
@@ -5389,7 +5548,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_matrix));
   __pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_matrix = ((PyArrayObject *)__pyx_v_matrix);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":543
+  /* "skbio/alignment/_ssw_wrapper.pyx":542
  *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
  * 
  *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
@@ -5423,7 +5582,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":617
+/* "skbio/alignment/_ssw_wrapper.pyx":616
  *         self.__KEEP_IT_IN_SCOPE_matrix = matrix
  * 
  *     def __call__(self, target_sequence):             # <<<<<<<<<<<<<<
@@ -5463,7 +5622,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
         else goto __pyx_L5_argtuple_error;
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 617; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
       goto __pyx_L5_argtuple_error;
@@ -5474,7 +5633,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 617; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -5514,7 +5673,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_pybuffernd_reference.data = NULL;
   __pyx_pybuffernd_reference.rcbuffer = &__pyx_pybuffer_reference;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":630
+  /* "skbio/alignment/_ssw_wrapper.pyx":629
  * 
  *         """
  *         reference_sequence = target_sequence             # <<<<<<<<<<<<<<
@@ -5524,14 +5683,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __Pyx_INCREF(__pyx_v_target_sequence);
   __pyx_v_reference_sequence = __pyx_v_target_sequence;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":632
+  /* "skbio/alignment/_ssw_wrapper.pyx":631
  *         reference_sequence = target_sequence
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] reference
  *         reference = self._seq_converter(reference_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int32_t ref_length
  */
-  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_reference_sequence)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_reference_sequence)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 631; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -5547,22 +5706,22 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
       }
     }
     __pyx_pybuffernd_reference.diminfo[0].strides = __pyx_pybuffernd_reference.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_reference.diminfo[0].shape = __pyx_pybuffernd_reference.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 631; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_v_reference = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":635
+  /* "skbio/alignment/_ssw_wrapper.pyx":634
  * 
  *         cdef cnp.int32_t ref_length
  *         ref_length = len(reference_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef s_align *align
  */
-  __pyx_t_6 = PyObject_Length(__pyx_v_reference_sequence); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = PyObject_Length(__pyx_v_reference_sequence); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_ref_length = __pyx_t_6;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":638
+  /* "skbio/alignment/_ssw_wrapper.pyx":637
  * 
  *         cdef s_align *align
  *         align = ssw_align(self.profile, <cnp.int8_t*> reference.data,             # <<<<<<<<<<<<<<
@@ -5571,26 +5730,26 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  */
   __pyx_v_align = ssw_align(__pyx_v_self->profile, ((__pyx_t_5numpy_int8_t *)__pyx_v_reference->data), __pyx_v_ref_length, __pyx_v_self->gap_open_penalty, __pyx_v_self->gap_extend_penalty, __pyx_v_self->bit_flag, __pyx_v_self->score_filter, __pyx_v_self->distance_filter, __pyx_v_self->mask_length);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":645
+  /* "skbio/alignment/_ssw_wrapper.pyx":644
  * 
  *         # Cython won't let me do this correctly, so duplicate code ahoy:
  *         if self.suppress_sequences:             # <<<<<<<<<<<<<<
  *             alignment = AlignmentStructure("", "", self.index_starts_at)
  *         else:
  */
-  __pyx_t_7 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->suppress_sequences)); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_7 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->suppress_sequences)); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 644; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_7) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":646
+    /* "skbio/alignment/_ssw_wrapper.pyx":645
  *         # Cython won't let me do this correctly, so duplicate code ahoy:
  *         if self.suppress_sequences:
  *             alignment = AlignmentStructure("", "", self.index_starts_at)             # <<<<<<<<<<<<<<
  *         else:
  *             alignment = AlignmentStructure(self.read_sequence,
  */
-    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_8);
     __Pyx_INCREF(__pyx_kp_s__6);
     __Pyx_GIVEREF(__pyx_kp_s__6);
@@ -5601,33 +5760,49 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
     __Pyx_GIVEREF(__pyx_t_1);
     PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
     __pyx_t_1 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
     __pyx_v_alignment = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_t_1);
     __pyx_t_1 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":644
+ * 
+ *         # Cython won't let me do this correctly, so duplicate code ahoy:
+ *         if self.suppress_sequences:             # <<<<<<<<<<<<<<
+ *             alignment = AlignmentStructure("", "", self.index_starts_at)
+ *         else:
+ */
     goto __pyx_L3;
   }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":647
+ *             alignment = AlignmentStructure("", "", self.index_starts_at)
+ *         else:
+ *             alignment = AlignmentStructure(self.read_sequence,             # <<<<<<<<<<<<<<
+ *                                            reference_sequence,
+ *                                            self.index_starts_at)
+ */
   /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":650
+    /* "skbio/alignment/_ssw_wrapper.pyx":649
  *             alignment = AlignmentStructure(self.read_sequence,
  *                                            reference_sequence,
  *                                            self.index_starts_at)             # <<<<<<<<<<<<<<
  *         alignment.__constructor(align)  # Hack to get a pointer through
  *         return alignment
  */
-    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":648
+    /* "skbio/alignment/_ssw_wrapper.pyx":647
  *             alignment = AlignmentStructure("", "", self.index_starts_at)
  *         else:
  *             alignment = AlignmentStructure(self.read_sequence,             # <<<<<<<<<<<<<<
  *                                            reference_sequence,
  *                                            self.index_starts_at)
  */
-    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_8);
     __Pyx_INCREF(__pyx_v_self->read_sequence);
     __Pyx_GIVEREF(__pyx_v_self->read_sequence);
@@ -5638,7 +5813,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
     __Pyx_GIVEREF(__pyx_t_1);
     PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
     __pyx_t_1 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
     __pyx_v_alignment = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_t_1);
@@ -5646,18 +5821,18 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":651
+  /* "skbio/alignment/_ssw_wrapper.pyx":650
  *                                            reference_sequence,
  *                                            self.index_starts_at)
  *         alignment.__constructor(align)  # Hack to get a pointer through             # <<<<<<<<<<<<<<
  *         return alignment
  * 
  */
-  __pyx_t_1 = ((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_alignment->__pyx_vtab)->__pyx___constructor(__pyx_v_alignment, __pyx_v_align); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 651; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = ((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_alignment->__pyx_vtab)->__pyx___constructor(__pyx_v_alignment, __pyx_v_align); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":652
+  /* "skbio/alignment/_ssw_wrapper.pyx":651
  *                                            self.index_starts_at)
  *         alignment.__constructor(align)  # Hack to get a pointer through
  *         return alignment             # <<<<<<<<<<<<<<
@@ -5669,7 +5844,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_r = ((PyObject *)__pyx_v_alignment);
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":617
+  /* "skbio/alignment/_ssw_wrapper.pyx":616
  *         self.__KEEP_IT_IN_SCOPE_matrix = matrix
  * 
  *     def __call__(self, target_sequence):             # <<<<<<<<<<<<<<
@@ -5699,7 +5874,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":654
+/* "skbio/alignment/_ssw_wrapper.pyx":653
  *         return alignment
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -5723,7 +5898,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__dealloc__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":655
+  /* "skbio/alignment/_ssw_wrapper.pyx":654
  * 
  *     def __dealloc__(self):
  *         if self.profile is not NULL:             # <<<<<<<<<<<<<<
@@ -5733,7 +5908,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
   __pyx_t_1 = ((__pyx_v_self->profile != NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":656
+    /* "skbio/alignment/_ssw_wrapper.pyx":655
  *     def __dealloc__(self):
  *         if self.profile is not NULL:
  *             init_destroy(self.profile)             # <<<<<<<<<<<<<<
@@ -5741,11 +5916,17 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
  *     def _get_bit_flag(self, override_skip_babp, score_only):
  */
     init_destroy(__pyx_v_self->profile);
-    goto __pyx_L3;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":654
+ * 
+ *     def __dealloc__(self):
+ *         if self.profile is not NULL:             # <<<<<<<<<<<<<<
+ *             init_destroy(self.profile)
+ * 
+ */
   }
-  __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":654
+  /* "skbio/alignment/_ssw_wrapper.pyx":653
  *         return alignment
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -5757,7 +5938,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
   __Pyx_RefNannyFinishContext();
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":658
+/* "skbio/alignment/_ssw_wrapper.pyx":657
  *             init_destroy(self.profile)
  * 
  *     def _get_bit_flag(self, override_skip_babp, score_only):             # <<<<<<<<<<<<<<
@@ -5796,11 +5977,11 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
         case  1:
         if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_score_only)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 657; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_bit_flag") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_bit_flag") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 657; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
       goto __pyx_L5_argtuple_error;
@@ -5813,7 +5994,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 657; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._get_bit_flag", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -5837,7 +6018,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_get_bit_flag", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":659
+  /* "skbio/alignment/_ssw_wrapper.pyx":658
  * 
  *     def _get_bit_flag(self, override_skip_babp, score_only):
  *         bit_flag = 0             # <<<<<<<<<<<<<<
@@ -5846,17 +6027,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  */
   __pyx_v_bit_flag = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":660
+  /* "skbio/alignment/_ssw_wrapper.pyx":659
  *     def _get_bit_flag(self, override_skip_babp, score_only):
  *         bit_flag = 0
  *         if score_only:             # <<<<<<<<<<<<<<
  *             return bit_flag
  *         if override_skip_babp:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_score_only); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_score_only); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":661
+    /* "skbio/alignment/_ssw_wrapper.pyx":660
  *         bit_flag = 0
  *         if score_only:
  *             return bit_flag             # <<<<<<<<<<<<<<
@@ -5864,24 +6045,32 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  *             bit_flag = bit_flag | 0x8
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_r = __pyx_t_2;
     __pyx_t_2 = 0;
     goto __pyx_L0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":659
+ *     def _get_bit_flag(self, override_skip_babp, score_only):
+ *         bit_flag = 0
+ *         if score_only:             # <<<<<<<<<<<<<<
+ *             return bit_flag
+ *         if override_skip_babp:
+ */
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":662
+  /* "skbio/alignment/_ssw_wrapper.pyx":661
  *         if score_only:
  *             return bit_flag
  *         if override_skip_babp:             # <<<<<<<<<<<<<<
  *             bit_flag = bit_flag | 0x8
  *         if self.distance_filter != 0:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_override_skip_babp); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_override_skip_babp); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":663
+    /* "skbio/alignment/_ssw_wrapper.pyx":662
  *             return bit_flag
  *         if override_skip_babp:
  *             bit_flag = bit_flag | 0x8             # <<<<<<<<<<<<<<
@@ -5889,11 +6078,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  *             bit_flag = bit_flag | 0x4
  */
     __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x8);
-    goto __pyx_L4;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":661
+ *         if score_only:
+ *             return bit_flag
+ *         if override_skip_babp:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x8
+ *         if self.distance_filter != 0:
+ */
   }
-  __pyx_L4:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":664
+  /* "skbio/alignment/_ssw_wrapper.pyx":663
  *         if override_skip_babp:
  *             bit_flag = bit_flag | 0x8
  *         if self.distance_filter != 0:             # <<<<<<<<<<<<<<
@@ -5903,7 +6098,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_t_1 = ((__pyx_v_self->distance_filter != 0) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":665
+    /* "skbio/alignment/_ssw_wrapper.pyx":664
  *             bit_flag = bit_flag | 0x8
  *         if self.distance_filter != 0:
  *             bit_flag = bit_flag | 0x4             # <<<<<<<<<<<<<<
@@ -5911,11 +6106,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  *             bit_flag = bit_flag | 0x2
  */
     __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x4);
-    goto __pyx_L5;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":663
+ *         if override_skip_babp:
+ *             bit_flag = bit_flag | 0x8
+ *         if self.distance_filter != 0:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x4
+ *         if self.score_filter != 0:
+ */
   }
-  __pyx_L5:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":666
+  /* "skbio/alignment/_ssw_wrapper.pyx":665
  *         if self.distance_filter != 0:
  *             bit_flag = bit_flag | 0x4
  *         if self.score_filter != 0:             # <<<<<<<<<<<<<<
@@ -5925,7 +6126,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_t_1 = ((__pyx_v_self->score_filter != 0) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":667
+    /* "skbio/alignment/_ssw_wrapper.pyx":666
  *             bit_flag = bit_flag | 0x4
  *         if self.score_filter != 0:
  *             bit_flag = bit_flag | 0x2             # <<<<<<<<<<<<<<
@@ -5933,11 +6134,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  *             bit_flag = bit_flag | 0x1
  */
     __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x2);
-    goto __pyx_L6;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":665
+ *         if self.distance_filter != 0:
+ *             bit_flag = bit_flag | 0x4
+ *         if self.score_filter != 0:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x2
+ *         if bit_flag == 0 or bit_flag == 8:
+ */
   }
-  __pyx_L6:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":668
+  /* "skbio/alignment/_ssw_wrapper.pyx":667
  *         if self.score_filter != 0:
  *             bit_flag = bit_flag | 0x2
  *         if bit_flag == 0 or bit_flag == 8:             # <<<<<<<<<<<<<<
@@ -5948,7 +6155,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
     case 0:
     case 8:
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":669
+    /* "skbio/alignment/_ssw_wrapper.pyx":668
  *             bit_flag = bit_flag | 0x2
  *         if bit_flag == 0 or bit_flag == 8:
  *             bit_flag = bit_flag | 0x1             # <<<<<<<<<<<<<<
@@ -5956,11 +6163,19 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  * 
  */
     __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x1);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":667
+ *         if self.score_filter != 0:
+ *             bit_flag = bit_flag | 0x2
+ *         if bit_flag == 0 or bit_flag == 8:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x1
+ *         return bit_flag
+ */
     break;
     default: break;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":670
+  /* "skbio/alignment/_ssw_wrapper.pyx":669
  *         if bit_flag == 0 or bit_flag == 8:
  *             bit_flag = bit_flag | 0x1
  *         return bit_flag             # <<<<<<<<<<<<<<
@@ -5968,13 +6183,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 670; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 669; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_r = __pyx_t_2;
   __pyx_t_2 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":658
+  /* "skbio/alignment/_ssw_wrapper.pyx":657
  *             init_destroy(self.profile)
  * 
  *     def _get_bit_flag(self, override_skip_babp, score_only):             # <<<<<<<<<<<<<<
@@ -5993,7 +6208,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":672
+/* "skbio/alignment/_ssw_wrapper.pyx":671
  *         return bit_flag
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(             # <<<<<<<<<<<<<<
@@ -6022,6 +6237,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   PyObject *__pyx_t_11 = NULL;
   int __pyx_t_12;
   PyObject *(*__pyx_t_13)(PyObject *);
+  long __pyx_t_14;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
@@ -6031,41 +6247,41 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_pybuffernd_seq.data = NULL;
   __pyx_pybuffernd_seq.rcbuffer = &__pyx_pybuffer_seq;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":676
+  /* "skbio/alignment/_ssw_wrapper.pyx":675
  *             sequence):
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
  *         seq = np.empty(len(sequence), dtype=np.int8)             # <<<<<<<<<<<<<<
  *         if self.is_protein:
  *             for i, char in enumerate(sequence):
  */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_3 = PyObject_Length(__pyx_v_sequence); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyObject_Length(__pyx_v_sequence); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
   PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_7 = ((PyArrayObject *)__pyx_t_6);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -6081,23 +6297,23 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       }
     }
     __pyx_pybuffernd_seq.diminfo[0].strides = __pyx_pybuffernd_seq.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_seq.diminfo[0].shape = __pyx_pybuffernd_seq.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_7 = 0;
   __pyx_v_seq = ((PyArrayObject *)__pyx_t_6);
   __pyx_t_6 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":677
+  /* "skbio/alignment/_ssw_wrapper.pyx":676
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
  *         seq = np.empty(len(sequence), dtype=np.int8)
  *         if self.is_protein:             # <<<<<<<<<<<<<<
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_aa_table[ord(char)]
  */
-  __pyx_t_12 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_12 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_12) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":678
+    /* "skbio/alignment/_ssw_wrapper.pyx":677
  *         seq = np.empty(len(sequence), dtype=np.int8)
  *         if self.is_protein:
  *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
@@ -6110,26 +6326,26 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __pyx_t_1 = __pyx_v_sequence; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
       __pyx_t_13 = NULL;
     } else {
-      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
       if (likely(!__pyx_t_13)) {
         if (likely(PyList_CheckExact(__pyx_t_1))) {
           if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_4);
           #endif
         } else {
           if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_4);
           #endif
         }
@@ -6139,7 +6355,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6149,37 +6365,29 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __pyx_t_4 = 0;
       __Pyx_INCREF(__pyx_t_6);
       __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
-      __pyx_t_4 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_6, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_DECREF(__pyx_t_6);
       __pyx_t_6 = __pyx_t_4;
       __pyx_t_4 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":679
+      /* "skbio/alignment/_ssw_wrapper.pyx":678
  *         if self.is_protein:
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_aa_table[ord(char)]             # <<<<<<<<<<<<<<
  *         else:
  *             for i, char in enumerate(sequence):
  */
-      __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_aa_table); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_aa_table); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_2);
-      __Pyx_INCREF(__pyx_v_char);
-      __Pyx_GIVEREF(__pyx_v_char);
-      PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_char);
-      __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-      __pyx_t_2 = PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __pyx_t_14 = __Pyx_PyObject_Ord(__pyx_v_char); if (unlikely(__pyx_t_14 == (long)(Py_UCS4)-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_4, __pyx_t_14, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_2) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_2) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":678
+      /* "skbio/alignment/_ssw_wrapper.pyx":677
  *         seq = np.empty(len(sequence), dtype=np.int8)
  *         if self.is_protein:
  *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
@@ -6189,43 +6397,51 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     }
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":676
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
+ *         seq = np.empty(len(sequence), dtype=np.int8)
+ *         if self.is_protein:             # <<<<<<<<<<<<<<
+ *             for i, char in enumerate(sequence):
+ *                 seq[i] = np_aa_table[ord(char)]
+ */
     goto __pyx_L3;
   }
-  /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":681
+  /* "skbio/alignment/_ssw_wrapper.pyx":680
  *                 seq[i] = np_aa_table[ord(char)]
  *         else:
  *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
  *                 seq[i] = np_nt_table[ord(char)]
  *         return seq
  */
+  /*else*/ {
     __Pyx_INCREF(__pyx_int_0);
     __pyx_t_6 = __pyx_int_0;
     if (likely(PyList_CheckExact(__pyx_v_sequence)) || PyTuple_CheckExact(__pyx_v_sequence)) {
       __pyx_t_1 = __pyx_v_sequence; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
       __pyx_t_13 = NULL;
     } else {
-      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
       if (likely(!__pyx_t_13)) {
         if (likely(PyList_CheckExact(__pyx_t_1))) {
           if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_2);
           #endif
         } else {
           if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_2);
           #endif
         }
@@ -6235,7 +6451,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6245,37 +6461,29 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __pyx_t_2 = 0;
       __Pyx_INCREF(__pyx_t_6);
       __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
-      __pyx_t_2 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_6, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_DECREF(__pyx_t_6);
       __pyx_t_6 = __pyx_t_2;
       __pyx_t_2 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":682
+      /* "skbio/alignment/_ssw_wrapper.pyx":681
  *         else:
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_nt_table[ord(char)]             # <<<<<<<<<<<<<<
  *         return seq
  * 
  */
-      __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_nt_table); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_nt_table); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
-      __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_INCREF(__pyx_v_char);
-      __Pyx_GIVEREF(__pyx_v_char);
-      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_char);
-      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_14 = __Pyx_PyObject_Ord(__pyx_v_char); if (unlikely(__pyx_t_14 == (long)(Py_UCS4)-1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_2, __pyx_t_14, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __pyx_t_5 = PyObject_GetItem(__pyx_t_2, __pyx_t_4); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
-      __Pyx_GOTREF(__pyx_t_5);
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_4) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_5) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":681
+      /* "skbio/alignment/_ssw_wrapper.pyx":680
  *                 seq[i] = np_aa_table[ord(char)]
  *         else:
  *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
@@ -6288,7 +6496,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":683
+  /* "skbio/alignment/_ssw_wrapper.pyx":682
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_nt_table[ord(char)]
  *         return seq             # <<<<<<<<<<<<<<
@@ -6300,7 +6508,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_r = ((PyArrayObject *)__pyx_v_seq);
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":672
+  /* "skbio/alignment/_ssw_wrapper.pyx":671
  *         return bit_flag
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(             # <<<<<<<<<<<<<<
@@ -6333,7 +6541,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":685
+/* "skbio/alignment/_ssw_wrapper.pyx":684
  *         return seq
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6363,7 +6571,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_build_match_matrix", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":687
+  /* "skbio/alignment/_ssw_wrapper.pyx":686
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
  *             _build_match_matrix(self, match_score, mismatch_score):
  *         sequence_order = "ACGTN"             # <<<<<<<<<<<<<<
@@ -6373,19 +6581,19 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __Pyx_INCREF(__pyx_n_s_ACGTN);
   __pyx_v_sequence_order = __pyx_n_s_ACGTN;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":688
+  /* "skbio/alignment/_ssw_wrapper.pyx":687
  *             _build_match_matrix(self, match_score, mismatch_score):
  *         sequence_order = "ACGTN"
  *         dict2d = {}             # <<<<<<<<<<<<<<
  *         for row in sequence_order:
  *             dict2d[row] = {}
  */
-  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 687; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_dict2d = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":689
+  /* "skbio/alignment/_ssw_wrapper.pyx":688
  *         sequence_order = "ACGTN"
  *         dict2d = {}
  *         for row in sequence_order:             # <<<<<<<<<<<<<<
@@ -6396,26 +6604,26 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     __pyx_t_1 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
     __pyx_t_3 = NULL;
   } else {
-    __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   for (;;) {
     if (likely(!__pyx_t_3)) {
       if (likely(PyList_CheckExact(__pyx_t_1))) {
         if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         #endif
       } else {
         if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         #endif
       }
@@ -6425,7 +6633,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -6434,19 +6642,19 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_4);
     __pyx_t_4 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":690
+    /* "skbio/alignment/_ssw_wrapper.pyx":689
  *         dict2d = {}
  *         for row in sequence_order:
  *             dict2d[row] = {}             # <<<<<<<<<<<<<<
  *             for column in sequence_order:
  *                 if column == 'N' or row == 'N':
  */
-    __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    if (unlikely(PyDict_SetItem(__pyx_v_dict2d, __pyx_v_row, __pyx_t_4) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(PyDict_SetItem(__pyx_v_dict2d, __pyx_v_row, __pyx_t_4) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":691
+    /* "skbio/alignment/_ssw_wrapper.pyx":690
  *         for row in sequence_order:
  *             dict2d[row] = {}
  *             for column in sequence_order:             # <<<<<<<<<<<<<<
@@ -6457,26 +6665,26 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __pyx_t_4 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
       __pyx_t_6 = NULL;
     } else {
-      __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
       if (likely(!__pyx_t_6)) {
         if (likely(PyList_CheckExact(__pyx_t_4))) {
           if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_7);
           #endif
         } else {
           if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_7);
           #endif
         }
@@ -6486,7 +6694,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6495,63 +6703,79 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __Pyx_XDECREF_SET(__pyx_v_column, __pyx_t_7);
       __pyx_t_7 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":692
+      /* "skbio/alignment/_ssw_wrapper.pyx":691
  *             dict2d[row] = {}
  *             for column in sequence_order:
  *                 if column == 'N' or row == 'N':             # <<<<<<<<<<<<<<
  *                     dict2d[row][column] = 0
  *                 else:
  */
-      __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_column, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_column, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       if (!__pyx_t_9) {
       } else {
         __pyx_t_8 = __pyx_t_9;
         goto __pyx_L8_bool_binop_done;
       }
-      __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_row, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_row, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_t_8 = __pyx_t_9;
       __pyx_L8_bool_binop_done:;
       if (__pyx_t_8) {
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":693
+        /* "skbio/alignment/_ssw_wrapper.pyx":692
  *             for column in sequence_order:
  *                 if column == 'N' or row == 'N':
  *                     dict2d[row][column] = 0             # <<<<<<<<<<<<<<
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \
  */
-        __pyx_t_7 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __pyx_t_7 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
         __Pyx_GOTREF(__pyx_t_7);
-        if (unlikely(PyObject_SetItem(__pyx_t_7, __pyx_v_column, __pyx_int_0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        if (unlikely(PyObject_SetItem(__pyx_t_7, __pyx_v_column, __pyx_int_0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-        goto __pyx_L7;
+
+        /* "skbio/alignment/_ssw_wrapper.pyx":691
+ *             dict2d[row] = {}
+ *             for column in sequence_order:
+ *                 if column == 'N' or row == 'N':             # <<<<<<<<<<<<<<
+ *                     dict2d[row][column] = 0
+ *                 else:
+ */
+        goto __pyx_L7;
       }
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":694
+ *                     dict2d[row][column] = 0
+ *                 else:
+ *                     dict2d[row][column] = match_score if row == column \             # <<<<<<<<<<<<<<
+ *                         else mismatch_score
+ *         return self._convert_dict2d_to_matrix(dict2d)
+ */
       /*else*/ {
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":696
+        /* "skbio/alignment/_ssw_wrapper.pyx":695
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \
  *                         else mismatch_score             # <<<<<<<<<<<<<<
  *         return self._convert_dict2d_to_matrix(dict2d)
  * 
  */
-        __pyx_t_10 = PyObject_RichCompare(__pyx_v_row, __pyx_v_column, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_10 = PyObject_RichCompare(__pyx_v_row, __pyx_v_column, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":695
+        /* "skbio/alignment/_ssw_wrapper.pyx":694
  *                     dict2d[row][column] = 0
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \             # <<<<<<<<<<<<<<
  *                         else mismatch_score
  *         return self._convert_dict2d_to_matrix(dict2d)
  */
-        __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
         if (__pyx_t_8) {
           __Pyx_INCREF(__pyx_v_match_score);
           __pyx_t_7 = __pyx_v_match_score;
         } else {
 
-          /* "skbio/alignment/_ssw_wrapper.pyx":696
+          /* "skbio/alignment/_ssw_wrapper.pyx":695
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \
  *                         else mismatch_score             # <<<<<<<<<<<<<<
@@ -6562,22 +6786,22 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
           __pyx_t_7 = __pyx_v_mismatch_score;
         }
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":695
+        /* "skbio/alignment/_ssw_wrapper.pyx":694
  *                     dict2d[row][column] = 0
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \             # <<<<<<<<<<<<<<
  *                         else mismatch_score
  *         return self._convert_dict2d_to_matrix(dict2d)
  */
-        __pyx_t_10 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_10 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __pyx_t_10 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_10 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
         __Pyx_GOTREF(__pyx_t_10);
-        if (unlikely(PyObject_SetItem(__pyx_t_10, __pyx_v_column, __pyx_t_7) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        if (unlikely(PyObject_SetItem(__pyx_t_10, __pyx_v_column, __pyx_t_7) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
         __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
       }
       __pyx_L7:;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":691
+      /* "skbio/alignment/_ssw_wrapper.pyx":690
  *         for row in sequence_order:
  *             dict2d[row] = {}
  *             for column in sequence_order:             # <<<<<<<<<<<<<<
@@ -6587,7 +6811,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     }
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":689
+    /* "skbio/alignment/_ssw_wrapper.pyx":688
  *         sequence_order = "ACGTN"
  *         dict2d = {}
  *         for row in sequence_order:             # <<<<<<<<<<<<<<
@@ -6597,7 +6821,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":697
+  /* "skbio/alignment/_ssw_wrapper.pyx":696
  *                     dict2d[row][column] = match_score if row == column \
  *                         else mismatch_score
  *         return self._convert_dict2d_to_matrix(dict2d)             # <<<<<<<<<<<<<<
@@ -6605,13 +6829,13 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
  */
   __Pyx_XDECREF(((PyObject *)__pyx_r));
-  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_dict2d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_dict2d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":685
+  /* "skbio/alignment/_ssw_wrapper.pyx":684
  *         return seq
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6637,7 +6861,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":699
+/* "skbio/alignment/_ssw_wrapper.pyx":698
  *         return self._convert_dict2d_to_matrix(dict2d)
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6668,7 +6892,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   Py_ssize_t __pyx_t_10;
   PyObject *(*__pyx_t_11)(PyObject *);
   __pyx_t_5numpy_int8_t __pyx_t_12;
-  int __pyx_t_13;
+  Py_ssize_t __pyx_t_13;
   int __pyx_t_14;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
@@ -6679,17 +6903,17 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_pybuffernd_py_list_matrix.data = NULL;
   __pyx_pybuffernd_py_list_matrix.rcbuffer = &__pyx_pybuffer_py_list_matrix;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":701
+  /* "skbio/alignment/_ssw_wrapper.pyx":700
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
  *             _convert_dict2d_to_matrix(self, dict2d):
  *         if self.is_protein:             # <<<<<<<<<<<<<<
  *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
  *         else:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":702
+    /* "skbio/alignment/_ssw_wrapper.pyx":701
  *             _convert_dict2d_to_matrix(self, dict2d):
  *         if self.is_protein:
  *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"             # <<<<<<<<<<<<<<
@@ -6698,23 +6922,31 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  */
     __Pyx_INCREF(__pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX);
     __pyx_v_sequence_order = __pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":700
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
+ *             _convert_dict2d_to_matrix(self, dict2d):
+ *         if self.is_protein:             # <<<<<<<<<<<<<<
+ *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
+ *         else:
+ */
     goto __pyx_L3;
   }
-  /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":704
+  /* "skbio/alignment/_ssw_wrapper.pyx":703
  *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
  *         else:
  *             sequence_order = "ACGTN"             # <<<<<<<<<<<<<<
  *         cdef int i = 0
  *         length = len(sequence_order)
  */
+  /*else*/ {
     __Pyx_INCREF(__pyx_n_s_ACGTN);
     __pyx_v_sequence_order = __pyx_n_s_ACGTN;
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":705
+  /* "skbio/alignment/_ssw_wrapper.pyx":704
  *         else:
  *             sequence_order = "ACGTN"
  *         cdef int i = 0             # <<<<<<<<<<<<<<
@@ -6723,59 +6955,59 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  */
   __pyx_v_i = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":706
+  /* "skbio/alignment/_ssw_wrapper.pyx":705
  *             sequence_order = "ACGTN"
  *         cdef int i = 0
  *         length = len(sequence_order)             # <<<<<<<<<<<<<<
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
  *             np.empty(length*length, dtype=np.int8)
  */
-  __pyx_t_2 = PyObject_Length(__pyx_v_sequence_order); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_3 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyObject_Length(__pyx_v_sequence_order); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_v_length = __pyx_t_3;
   __pyx_t_3 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":708
+  /* "skbio/alignment/_ssw_wrapper.pyx":707
  *         length = len(sequence_order)
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
  *             np.empty(length*length, dtype=np.int8)             # <<<<<<<<<<<<<<
  *         for row in sequence_order:
  *             for column in sequence_order:
  */
-  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = PyNumber_Multiply(__pyx_v_length, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyNumber_Multiply(__pyx_v_length, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_5);
   __Pyx_GIVEREF(__pyx_t_3);
   PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
   __pyx_t_3 = 0;
-  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int8); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int8); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_7);
   __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_7);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_8 = ((PyArrayObject *)__pyx_t_7);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
     if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
       __pyx_v_py_list_matrix = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.buf = NULL;
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     } else {__pyx_pybuffernd_py_list_matrix.diminfo[0].strides = __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_py_list_matrix.diminfo[0].shape = __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.shape[0];
     }
   }
@@ -6783,7 +7015,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_v_py_list_matrix = ((PyArrayObject *)__pyx_t_7);
   __pyx_t_7 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":709
+  /* "skbio/alignment/_ssw_wrapper.pyx":708
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
  *             np.empty(length*length, dtype=np.int8)
  *         for row in sequence_order:             # <<<<<<<<<<<<<<
@@ -6794,26 +7026,26 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     __pyx_t_7 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_7); __pyx_t_2 = 0;
     __pyx_t_9 = NULL;
   } else {
-    __pyx_t_2 = -1; __pyx_t_7 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = -1; __pyx_t_7 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_7);
-    __pyx_t_9 = Py_TYPE(__pyx_t_7)->tp_iternext; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_9 = Py_TYPE(__pyx_t_7)->tp_iternext; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   for (;;) {
     if (likely(!__pyx_t_9)) {
       if (likely(PyList_CheckExact(__pyx_t_7))) {
         if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_7)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_3 = PyList_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PyList_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         #endif
       } else {
         if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_7)) break;
         #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         #else
-        __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         #endif
       }
@@ -6823,7 +7055,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -6832,7 +7064,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_3);
     __pyx_t_3 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":710
+    /* "skbio/alignment/_ssw_wrapper.pyx":709
  *             np.empty(length*length, dtype=np.int8)
  *         for row in sequence_order:
  *             for column in sequence_order:             # <<<<<<<<<<<<<<
@@ -6843,26 +7075,26 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __pyx_t_3 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_3); __pyx_t_10 = 0;
       __pyx_t_11 = NULL;
     } else {
-      __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
       if (likely(!__pyx_t_11)) {
         if (likely(PyList_CheckExact(__pyx_t_3))) {
           if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_3)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_5 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_5 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_5);
           #endif
         } else {
           if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
           #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           #else
-          __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           __Pyx_GOTREF(__pyx_t_5);
           #endif
         }
@@ -6872,7 +7104,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6881,19 +7113,19 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __Pyx_XDECREF_SET(__pyx_v_column, __pyx_t_5);
       __pyx_t_5 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":711
+      /* "skbio/alignment/_ssw_wrapper.pyx":710
  *         for row in sequence_order:
  *             for column in sequence_order:
  *                 py_list_matrix[i] = dict2d[row][column]             # <<<<<<<<<<<<<<
  *                 i += 1
  *         return py_list_matrix
  */
-      __pyx_t_5 = PyObject_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __pyx_t_5 = PyObject_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_5);
-      __pyx_t_4 = PyObject_GetItem(__pyx_t_5, __pyx_v_column); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __pyx_t_4 = PyObject_GetItem(__pyx_t_5, __pyx_v_column); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __pyx_t_12 = __Pyx_PyInt_As_npy_int8(__pyx_t_4); if (unlikely((__pyx_t_12 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_12 = __Pyx_PyInt_As_npy_int8(__pyx_t_4); if (unlikely((__pyx_t_12 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       __pyx_t_13 = __pyx_v_i;
       __pyx_t_14 = -1;
@@ -6903,11 +7135,11 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_py_list_matrix.diminfo[0].shape)) __pyx_t_14 = 0;
       if (unlikely(__pyx_t_14 != -1)) {
         __Pyx_RaiseBufferIndexError(__pyx_t_14);
-        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       *__Pyx_BufPtrCContig1d(__pyx_t_5numpy_int8_t *, __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_py_list_matrix.diminfo[0].strides) = __pyx_t_12;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":712
+      /* "skbio/alignment/_ssw_wrapper.pyx":711
  *             for column in sequence_order:
  *                 py_list_matrix[i] = dict2d[row][column]
  *                 i += 1             # <<<<<<<<<<<<<<
@@ -6915,7 +7147,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  */
       __pyx_v_i = (__pyx_v_i + 1);
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":710
+      /* "skbio/alignment/_ssw_wrapper.pyx":709
  *             np.empty(length*length, dtype=np.int8)
  *         for row in sequence_order:
  *             for column in sequence_order:             # <<<<<<<<<<<<<<
@@ -6925,7 +7157,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     }
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":709
+    /* "skbio/alignment/_ssw_wrapper.pyx":708
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
  *             np.empty(length*length, dtype=np.int8)
  *         for row in sequence_order:             # <<<<<<<<<<<<<<
@@ -6935,7 +7167,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   }
   __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":713
+  /* "skbio/alignment/_ssw_wrapper.pyx":712
  *                 py_list_matrix[i] = dict2d[row][column]
  *                 i += 1
  *         return py_list_matrix             # <<<<<<<<<<<<<<
@@ -6945,7 +7177,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_r = ((PyArrayObject *)__pyx_v_py_list_matrix);
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":699
+  /* "skbio/alignment/_ssw_wrapper.pyx":698
  *         return self._convert_dict2d_to_matrix(dict2d)
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6980,7 +7212,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -7030,7 +7262,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GIVEREF(__pyx_v_info->obj);
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":203
  *             # of flags
  * 
  *             if info == NULL: return             # <<<<<<<<<<<<<<
@@ -7043,7 +7275,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L0;
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":206
  * 
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1             # <<<<<<<<<<<<<<
@@ -7052,7 +7284,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_endian_detector = 1;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":207
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
@@ -7061,7 +7293,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":209
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  * 
  *             ndim = PyArray_NDIM(self)             # <<<<<<<<<<<<<<
@@ -7070,7 +7302,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":211
  *             ndim = PyArray_NDIM(self)
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -7080,7 +7312,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":212
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 copy_shape = 1             # <<<<<<<<<<<<<<
@@ -7088,22 +7320,30 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  *                 copy_shape = 0
  */
     __pyx_v_copy_shape = 1;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":211
+ *             ndim = PyArray_NDIM(self)
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 copy_shape = 1
+ *             else:
+ */
     goto __pyx_L4;
   }
-  /*else*/ {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":214
  *                 copy_shape = 1
  *             else:
  *                 copy_shape = 0             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  */
+  /*else*/ {
     __pyx_v_copy_shape = 0;
   }
   __pyx_L4:;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":216
  *                 copy_shape = 0
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
@@ -7117,7 +7357,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L6_bool_binop_done;
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":217
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):             # <<<<<<<<<<<<<<
@@ -7127,9 +7367,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
   __pyx_t_1 = __pyx_t_2;
   __pyx_L6_bool_binop_done:;
+
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":216
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
@@ -7141,9 +7389,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_Raise(__pyx_t_3, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":216
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":220
  *                 raise ValueError(u"ndarray is not C contiguous")
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
@@ -7157,7 +7413,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L9_bool_binop_done;
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":221
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):             # <<<<<<<<<<<<<<
@@ -7167,9 +7423,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
   __pyx_t_1 = __pyx_t_2;
   __pyx_L9_bool_binop_done:;
+
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":220
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
@@ -7181,9 +7445,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_Raise(__pyx_t_3, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":220
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":224
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  * 
  *             info.buf = PyArray_DATA(self)             # <<<<<<<<<<<<<<
@@ -7192,7 +7464,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":225
  * 
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim             # <<<<<<<<<<<<<<
@@ -7201,7 +7473,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->ndim = __pyx_v_ndim;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":226
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim
  *             if copy_shape:             # <<<<<<<<<<<<<<
@@ -7211,7 +7483,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (__pyx_v_copy_shape != 0);
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":229
  *                 # Allocate new buffer for strides and shape info.
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)             # <<<<<<<<<<<<<<
@@ -7220,7 +7492,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":230
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim             # <<<<<<<<<<<<<<
@@ -7229,7 +7501,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":231
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):             # <<<<<<<<<<<<<<
@@ -7240,7 +7512,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
       __pyx_v_i = __pyx_t_5;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":232
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]             # <<<<<<<<<<<<<<
@@ -7249,7 +7521,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":233
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]
  *                     info.shape[i] = PyArray_DIMS(self)[i]             # <<<<<<<<<<<<<<
@@ -7258,20 +7530,28 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
     }
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":226
+ *             info.buf = PyArray_DATA(self)
+ *             info.ndim = ndim
+ *             if copy_shape:             # <<<<<<<<<<<<<<
+ *                 # Allocate new buffer for strides and shape info.
+ *                 # This is allocated as one block, strides first.
+ */
     goto __pyx_L11;
   }
-  /*else*/ {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":235
  *                     info.shape[i] = PyArray_DIMS(self)[i]
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)             # <<<<<<<<<<<<<<
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  */
+  /*else*/ {
     __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":236
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)             # <<<<<<<<<<<<<<
@@ -7282,7 +7562,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L11:;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":237
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
@@ -7291,7 +7571,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->suboffsets = NULL;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":238
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)             # <<<<<<<<<<<<<<
@@ -7300,7 +7580,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":239
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)
  *             info.readonly = not PyArray_ISWRITEABLE(self)             # <<<<<<<<<<<<<<
@@ -7309,28 +7589,28 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":242
  * 
  *             cdef int t
  *             cdef char* f = NULL             # <<<<<<<<<<<<<<
  *             cdef dtype descr = self.descr
- *             cdef list stack
+ *             cdef int offset
  */
   __pyx_v_f = NULL;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":243
  *             cdef int t
  *             cdef char* f = NULL
  *             cdef dtype descr = self.descr             # <<<<<<<<<<<<<<
- *             cdef list stack
  *             cdef int offset
+ * 
  */
   __pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
   __Pyx_INCREF(__pyx_t_3);
   __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
   __pyx_t_3 = 0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":247
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":246
  *             cdef int offset
  * 
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)             # <<<<<<<<<<<<<<
@@ -7339,7 +7619,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":249
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":248
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
  * 
  *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
@@ -7357,7 +7637,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_L15_bool_binop_done:;
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":250
  *             if not hasfields and not copy_shape:
  *                 # do not call releasebuffer
  *                 info.obj = None             # <<<<<<<<<<<<<<
@@ -7369,17 +7649,25 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GOTREF(__pyx_v_info->obj);
     __Pyx_DECREF(__pyx_v_info->obj);
     __pyx_v_info->obj = Py_None;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":248
+ *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
+ * 
+ *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
+ *                 # do not call releasebuffer
+ *                 info.obj = None
+ */
     goto __pyx_L14;
   }
-  /*else*/ {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":253
  *             else:
  *                 # need to call releasebuffer
  *                 info.obj = self             # <<<<<<<<<<<<<<
  * 
  *             if not hasfields:
  */
+  /*else*/ {
     __Pyx_INCREF(((PyObject *)__pyx_v_self));
     __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
     __Pyx_GOTREF(__pyx_v_info->obj);
@@ -7388,7 +7676,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L14:;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":255
  *                 info.obj = self
  * 
  *             if not hasfields:             # <<<<<<<<<<<<<<
@@ -7398,7 +7686,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":256
  * 
  *             if not hasfields:
  *                 t = descr.type_num             # <<<<<<<<<<<<<<
@@ -7408,7 +7696,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __pyx_t_4 = __pyx_v_descr->type_num;
     __pyx_v_t = __pyx_t_4;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":257
  *             if not hasfields:
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
@@ -7428,7 +7716,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     }
     __pyx_L20_next_or:;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":258
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
@@ -7444,43 +7732,51 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
     __pyx_t_1 = __pyx_t_2;
     __pyx_L19_bool_binop_done:;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
+ */
     if (__pyx_t_1) {
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    }
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
- *                 elif t == NPY_CDOUBLE:     f = "Zd"
- *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
- *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
- *                 else:
- *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
  */
-    switch (__pyx_v_t) {
+    }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":260
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"             # <<<<<<<<<<<<<<
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  */
+    switch (__pyx_v_t) {
       case NPY_BYTE:
       __pyx_v_f = __pyx_k_b;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":261
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"             # <<<<<<<<<<<<<<
@@ -7491,7 +7787,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_B;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":262
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"             # <<<<<<<<<<<<<<
@@ -7502,7 +7798,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_h;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":263
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"             # <<<<<<<<<<<<<<
@@ -7513,7 +7809,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_H;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":264
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"             # <<<<<<<<<<<<<<
@@ -7524,7 +7820,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_i;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":265
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"             # <<<<<<<<<<<<<<
@@ -7535,7 +7831,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_I;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":266
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"             # <<<<<<<<<<<<<<
@@ -7546,7 +7842,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_l;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":267
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"             # <<<<<<<<<<<<<<
@@ -7557,7 +7853,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_L;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":268
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"             # <<<<<<<<<<<<<<
@@ -7568,7 +7864,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_q;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":269
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"             # <<<<<<<<<<<<<<
@@ -7579,7 +7875,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Q;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":270
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"             # <<<<<<<<<<<<<<
@@ -7590,7 +7886,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_f;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":271
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"             # <<<<<<<<<<<<<<
@@ -7601,7 +7897,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_d;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":272
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"             # <<<<<<<<<<<<<<
@@ -7612,7 +7908,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_g;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":273
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"             # <<<<<<<<<<<<<<
@@ -7623,7 +7919,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zf;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":274
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"             # <<<<<<<<<<<<<<
@@ -7634,7 +7930,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zd;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":275
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"             # <<<<<<<<<<<<<<
@@ -7645,7 +7941,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zg;
       break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":276
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
  *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
@@ -7657,33 +7953,33 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       break;
       default:
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":278
  *                 elif t == NPY_OBJECT:      f = "O"
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *                 info.format = f
  *                 return
  */
-      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_GIVEREF(__pyx_t_6);
       PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
       __pyx_t_6 = 0;
-      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       __Pyx_Raise(__pyx_t_6, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       break;
     }
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":279
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f             # <<<<<<<<<<<<<<
@@ -7692,7 +7988,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->format = __pyx_v_f;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":280
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f
  *                 return             # <<<<<<<<<<<<<<
@@ -7701,19 +7997,27 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_r = 0;
     goto __pyx_L0;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":255
+ *                 info.obj = self
+ * 
+ *             if not hasfields:             # <<<<<<<<<<<<<<
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ */
   }
-  /*else*/ {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":282
  *                 return
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)             # <<<<<<<<<<<<<<
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  */
-    __pyx_v_info->format = ((char *)malloc(255));
+  /*else*/ {
+    __pyx_v_info->format = ((char *)malloc(0xFF));
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":283
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment             # <<<<<<<<<<<<<<
@@ -7722,7 +8026,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     (__pyx_v_info->format[0]) = '^';
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":284
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0             # <<<<<<<<<<<<<<
@@ -7731,17 +8035,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_offset = 0;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":285
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  *                 f = _util_dtypestring(descr, info.format + 1,             # <<<<<<<<<<<<<<
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  */
-    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_v_f = __pyx_t_7;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":288
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  *                 f[0] = c'\0' # Terminate format string             # <<<<<<<<<<<<<<
@@ -7751,7 +8055,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     (__pyx_v_f[0]) = '\x00';
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -7783,7 +8087,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":290
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -7807,7 +8111,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__releasebuffer__", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":291
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
@@ -7817,7 +8121,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":292
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)             # <<<<<<<<<<<<<<
@@ -7825,11 +8129,17 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
  *                 stdlib.free(info.strides)
  */
     free(__pyx_v_info->format);
-    goto __pyx_L3;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":291
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
   }
-  __pyx_L3:;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":293
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -7839,7 +8149,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":295
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":294
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 stdlib.free(info.strides)             # <<<<<<<<<<<<<<
@@ -7847,11 +8157,17 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
  * 
  */
     free(__pyx_v_info->strides);
-    goto __pyx_L4;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":293
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.strides)
+ *                 # info.shape was stored after info.strides in the same block
+ */
   }
-  __pyx_L4:;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":290
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -7863,7 +8179,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __Pyx_RefNannyFinishContext();
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":770
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -7880,7 +8196,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":771
  * 
  * cdef inline object PyArray_MultiIterNew1(a):
  *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
@@ -7888,13 +8204,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
  * cdef inline object PyArray_MultiIterNew2(a, b):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":770
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -7913,7 +8229,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":773
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -7930,7 +8246,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":774
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
@@ -7938,13 +8254,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":773
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -7963,7 +8279,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":776
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -7980,7 +8296,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":777
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
@@ -7988,13 +8304,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":776
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -8013,7 +8329,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":779
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -8030,7 +8346,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":780
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
@@ -8038,13 +8354,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":779
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -8063,7 +8379,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":782
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -8080,7 +8396,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":783
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
@@ -8088,13 +8404,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":782
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -8113,7 +8429,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":785
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -8145,17 +8461,17 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_util_dtypestring", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":793
- *     cdef int delta_offset
- *     cdef tuple i
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":790
+ * 
+ *     cdef dtype child
  *     cdef int endian_detector = 1             # <<<<<<<<<<<<<<
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  *     cdef tuple fields
  */
   __pyx_v_endian_detector = 1;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
- *     cdef tuple i
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":791
+ *     cdef dtype child
  *     cdef int endian_detector = 1
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
  *     cdef tuple fields
@@ -8163,7 +8479,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -8172,21 +8488,21 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   if (unlikely(__pyx_v_descr->names == Py_None)) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
   for (;;) {
     if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
     #if CYTHON_COMPILING_IN_CPYTHON
-    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #else
-    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     #endif
     __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
     __pyx_t_3 = 0;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":795
  * 
  *     for childname in descr.names:
  *         fields = descr.fields[childname]             # <<<<<<<<<<<<<<
@@ -8195,15 +8511,15 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
     if (unlikely(__pyx_v_descr->fields == Py_None)) {
       PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_3);
-    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
     __pyx_t_3 = 0;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":796
  *     for childname in descr.names:
  *         fields = descr.fields[childname]
  *         child, new_offset = fields             # <<<<<<<<<<<<<<
@@ -8220,7 +8536,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       if (unlikely(size != 2)) {
         if (size > 2) __Pyx_RaiseTooManyValuesError(2);
         else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       #if CYTHON_COMPILING_IN_CPYTHON
       __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
@@ -8228,52 +8544,60 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __Pyx_INCREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_t_4);
       #else
-      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       #endif
     } else {
-      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
     __pyx_t_3 = 0;
     __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
     __pyx_t_4 = 0;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":798
  *         child, new_offset = fields
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  */
-    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
     if (__pyx_t_6) {
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":799
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":798
+ *         child, new_offset = fields
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ */
     }
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":801
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
@@ -8293,7 +8617,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     }
     __pyx_L8_next_or:;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":802
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
@@ -8309,23 +8633,39 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
     __pyx_t_6 = __pyx_t_7;
     __pyx_L7_bool_binop_done:;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
     if (__pyx_t_6) {
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":803
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
     }
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":813
  * 
  *         # Output padding bytes
  *         while offset[0] < new_offset:             # <<<<<<<<<<<<<<
@@ -8333,24 +8673,24 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             f += 1
  */
     while (1) {
-      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (!__pyx_t_6) break;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":814
  *         # Output padding bytes
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte             # <<<<<<<<<<<<<<
  *             f += 1
  *             offset[0] += 1
  */
-      (__pyx_v_f[0]) = 120;
+      (__pyx_v_f[0]) = 0x78;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":815
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte
  *             f += 1             # <<<<<<<<<<<<<<
@@ -8359,7 +8699,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
       __pyx_v_f = (__pyx_v_f + 1);
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":816
  *             f[0] = 120 # "x"; pad byte
  *             f += 1
  *             offset[0] += 1             # <<<<<<<<<<<<<<
@@ -8370,7 +8710,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
     }
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":818
  *             offset[0] += 1
  * 
  *         offset[0] += child.itemsize             # <<<<<<<<<<<<<<
@@ -8380,7 +8720,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_8 = 0;
     (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":820
  *         offset[0] += child.itemsize
  * 
  *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
@@ -8390,19 +8730,19 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
     if (__pyx_t_6) {
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":821
  * 
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num             # <<<<<<<<<<<<<<
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")
  */
-      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
       __pyx_t_4 = 0;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":822
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num
  *             if end - f < 5:             # <<<<<<<<<<<<<<
@@ -8412,357 +8752,365 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
       if (__pyx_t_6) {
 
-        /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+        /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         __Pyx_Raise(__pyx_t_4, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+        /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":822
+ *         if not PyDataType_HASFIELDS(child):
+ *             t = child.type_num
+ *             if end - f < 5:             # <<<<<<<<<<<<<<
+ *                 raise RuntimeError(u"Format string allocated too short.")
+ * 
+ */
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":826
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 98;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":827
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"             # <<<<<<<<<<<<<<
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 66;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":828
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"             # <<<<<<<<<<<<<<
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 104;
+        (__pyx_v_f[0]) = 0x68;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":829
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"             # <<<<<<<<<<<<<<
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 72;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":830
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 105;
+        (__pyx_v_f[0]) = 0x69;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":831
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 73;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":832
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 108;
+        (__pyx_v_f[0]) = 0x6C;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":833
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 76;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":834
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 113;
+        (__pyx_v_f[0]) = 0x71;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":835
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 81;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":836
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"             # <<<<<<<<<<<<<<
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 102;
+        (__pyx_v_f[0]) = 0x66;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":837
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 100;
+        (__pyx_v_f[0]) = 0x64;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":838
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"             # <<<<<<<<<<<<<<
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 103;
+        (__pyx_v_f[0]) = 0x67;
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":839
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf             # <<<<<<<<<<<<<<
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 102;
+        (__pyx_v_f[1]) = 0x66;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":843
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":840
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd             # <<<<<<<<<<<<<<
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 100;
+        (__pyx_v_f[1]) = 0x64;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":841
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg             # <<<<<<<<<<<<<<
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 103;
+        (__pyx_v_f[1]) = 0x67;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":842
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"             # <<<<<<<<<<<<<<
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 79;
         goto __pyx_L15;
       }
-      /*else*/ {
 
-        /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":844
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *             f += 1
  *         else:
  */
-        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      /*else*/ {
+        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
-        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         __Pyx_GIVEREF(__pyx_t_3);
         PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __pyx_t_3 = 0;
-        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
         __Pyx_Raise(__pyx_t_3, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       __pyx_L15:;
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":845
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *             f += 1             # <<<<<<<<<<<<<<
@@ -8770,23 +9118,31 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             # Cython ignores struct boundary information ("T{...}"),
  */
       __pyx_v_f = (__pyx_v_f + 1);
+
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":820
+ *         offset[0] += child.itemsize
+ * 
+ *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
+ *             t = child.type_num
+ *             if end - f < 5:
+ */
       goto __pyx_L13;
     }
-    /*else*/ {
 
-      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":849
  *             # Cython ignores struct boundary information ("T{...}"),
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)             # <<<<<<<<<<<<<<
  *     return f
  * 
  */
-      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    /*else*/ {
+      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_v_f = __pyx_t_9;
     }
     __pyx_L13:;
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -8796,7 +9152,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":850
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)
  *     return f             # <<<<<<<<<<<<<<
@@ -8806,7 +9162,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   __pyx_r = __pyx_v_f;
   goto __pyx_L0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":785
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -8831,7 +9187,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   return __pyx_r;
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":966
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -8846,7 +9202,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   int __pyx_t_2;
   __Pyx_RefNannySetupContext("set_array_base", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":968
  * cdef inline void set_array_base(ndarray arr, object base):
  *      cdef PyObject* baseptr
  *      if base is None:             # <<<<<<<<<<<<<<
@@ -8857,7 +9213,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __pyx_t_2 = (__pyx_t_1 != 0);
   if (__pyx_t_2) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":969
  *      cdef PyObject* baseptr
  *      if base is None:
  *          baseptr = NULL             # <<<<<<<<<<<<<<
@@ -8865,20 +9221,28 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  *          Py_INCREF(base) # important to do this before decref below!
  */
     __pyx_v_baseptr = NULL;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":968
+ * cdef inline void set_array_base(ndarray arr, object base):
+ *      cdef PyObject* baseptr
+ *      if base is None:             # <<<<<<<<<<<<<<
+ *          baseptr = NULL
+ *      else:
+ */
     goto __pyx_L3;
   }
-  /*else*/ {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":971
  *          baseptr = NULL
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!             # <<<<<<<<<<<<<<
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  */
+  /*else*/ {
     Py_INCREF(__pyx_v_base);
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":975
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":972
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base             # <<<<<<<<<<<<<<
@@ -8889,7 +9253,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   }
   __pyx_L3:;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":973
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)             # <<<<<<<<<<<<<<
@@ -8898,7 +9262,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   Py_XDECREF(__pyx_v_arr->base);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":974
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  *      arr.base = baseptr             # <<<<<<<<<<<<<<
@@ -8907,7 +9271,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   __pyx_v_arr->base = __pyx_v_baseptr;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":966
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -8919,7 +9283,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __Pyx_RefNannyFinishContext();
 }
 
-/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -8933,7 +9297,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("get_array_base", 0);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":977
  * 
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:             # <<<<<<<<<<<<<<
@@ -8943,7 +9307,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":981
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":978
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:
  *         return None             # <<<<<<<<<<<<<<
@@ -8954,21 +9318,29 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
     __Pyx_INCREF(Py_None);
     __pyx_r = Py_None;
     goto __pyx_L0;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":977
+ * 
+ * cdef inline object get_array_base(ndarray arr):
+ *     if arr.base is NULL:             # <<<<<<<<<<<<<<
+ *         return None
+ *     else:
+ */
   }
-  /*else*/ {
 
-    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":983
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":980
  *         return None
  *     else:
  *         return <object>arr.base             # <<<<<<<<<<<<<<
  */
+  /*else*/ {
     __Pyx_XDECREF(__pyx_r);
     __Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
     __pyx_r = ((PyObject *)__pyx_v_arr->base);
     goto __pyx_L0;
   }
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -9082,8 +9454,9 @@ static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructu
   0, /*tp_setattr*/
   #if PY_MAJOR_VERSION < 3
   0, /*tp_compare*/
-  #else
-  0, /*reserved*/
+  #endif
+  #if PY_MAJOR_VERSION >= 3
+  0, /*tp_as_async*/
   #endif
   __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_7__repr__, /*tp_repr*/
   0, /*tp_as_number*/
@@ -9227,8 +9600,9 @@ static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWate
   0, /*tp_setattr*/
   #if PY_MAJOR_VERSION < 3
   0, /*tp_compare*/
-  #else
-  0, /*reserved*/
+  #endif
+  #if PY_MAJOR_VERSION >= 3
+  0, /*tp_as_async*/
   #endif
   0, /*tp_repr*/
   0, /*tp_as_number*/
@@ -9299,7 +9673,6 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_kp_s_, __pyx_k_, sizeof(__pyx_k_), 0, 0, 1, 0},
   {&__pyx_n_s_ACGTN, __pyx_k_ACGTN, sizeof(__pyx_k_ACGTN), 0, 0, 1, 1},
   {&__pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX, __pyx_k_ARNDCQEGHILKMFPSTWYVBZX, sizeof(__pyx_k_ARNDCQEGHILKMFPSTWYVBZX), 0, 0, 1, 0},
-  {&__pyx_n_s_Alignment, __pyx_k_Alignment, sizeof(__pyx_k_Alignment), 0, 0, 1, 1},
   {&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1},
   {&__pyx_n_s_Exception, __pyx_k_Exception, sizeof(__pyx_k_Exception), 0, 0, 1, 1},
   {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
@@ -9356,7 +9729,6 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_n_s_np_nt_table, __pyx_k_np_nt_table, sizeof(__pyx_k_np_nt_table), 0, 0, 1, 1},
   {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
   {&__pyx_n_s_optimal_alignment_score, __pyx_k_optimal_alignment_score, sizeof(__pyx_k_optimal_alignment_score), 0, 0, 1, 1},
-  {&__pyx_n_s_ord, __pyx_k_ord, sizeof(__pyx_k_ord), 0, 0, 1, 1},
   {&__pyx_n_s_override_skip_babp, __pyx_k_override_skip_babp, sizeof(__pyx_k_override_skip_babp), 0, 0, 1, 1},
   {&__pyx_n_s_property, __pyx_k_property, sizeof(__pyx_k_property), 0, 0, 1, 1},
   {&__pyx_n_s_protein, __pyx_k_protein, sizeof(__pyx_k_protein), 0, 0, 1, 1},
@@ -9374,7 +9746,6 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_n_s_score_size, __pyx_k_score_size, sizeof(__pyx_k_score_size), 0, 0, 1, 1},
   {&__pyx_n_s_sequence, __pyx_k_sequence, sizeof(__pyx_k_sequence), 0, 0, 1, 1},
   {&__pyx_n_s_set_zero_based, __pyx_k_set_zero_based, sizeof(__pyx_k_set_zero_based), 0, 0, 1, 1},
-  {&__pyx_n_s_skbio_alignment, __pyx_k_skbio_alignment, sizeof(__pyx_k_skbio_alignment), 0, 0, 1, 1},
   {&__pyx_n_s_skbio_sequence, __pyx_k_skbio_sequence, sizeof(__pyx_k_skbio_sequence), 0, 0, 1, 1},
   {&__pyx_n_s_suboptimal_alignment_score, __pyx_k_suboptimal_alignment_score, sizeof(__pyx_k_suboptimal_alignment_score), 0, 0, 1, 1},
   {&__pyx_n_s_substitution_matrix, __pyx_k_substitution_matrix, sizeof(__pyx_k_substitution_matrix), 0, 0, 1, 1},
@@ -9391,13 +9762,12 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {0, 0, 0, 0, 0, 0, 0}
 };
 static int __Pyx_InitCachedBuiltins(void) {
-  __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_Exception = __Pyx_GetBuiltinName(__pyx_n_s_Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_ord = __Pyx_GetBuiltinName(__pyx_n_s_ord); if (!__pyx_builtin_ord) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_Exception = __Pyx_GetBuiltinName(__pyx_n_s_Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;
   __pyx_L1_error:;
   return -1;
@@ -9407,73 +9777,73 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":125
+  /* "skbio/alignment/_ssw_wrapper.pyx":124
  *             align_len = len(query)
  *             if align_len > 13:
  *                 target = target[:10] + "..."             # <<<<<<<<<<<<<<
  *                 query = query[:10] + "..."
  * 
  */
-  __pyx_slice__2 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_slice__2 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_slice__2);
   __Pyx_GIVEREF(__pyx_slice__2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":126
+  /* "skbio/alignment/_ssw_wrapper.pyx":125
  *             if align_len > 13:
  *                 target = target[:10] + "..."
  *                 query = query[:10] + "..."             # <<<<<<<<<<<<<<
  * 
  *             length = "Length: %d" % align_len
  */
-  __pyx_slice__4 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_slice__4 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_slice__4);
   __Pyx_GIVEREF(__pyx_slice__4);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":371
+  /* "skbio/alignment/_ssw_wrapper.pyx":370
  *         # Save the original index scheme and then set it to 0 (1/2)
  *         orig_z_base = self.is_zero_based()
  *         self.set_zero_based(True)             # <<<<<<<<<<<<<<
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]
  */
-  __pyx_tuple__7 = PyTuple_Pack(1, Py_True); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__7 = PyTuple_Pack(1, Py_True); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__7);
   __Pyx_GIVEREF(__pyx_tuple__7);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":562
+  /* "skbio/alignment/_ssw_wrapper.pyx":561
  *         self.read_sequence = query_sequence
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  */
-  __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_gap_open_penalty_must_be_0); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_gap_open_penalty_must_be_0); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__9);
   __Pyx_GIVEREF(__pyx_tuple__9);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":565
+  /* "skbio/alignment/_ssw_wrapper.pyx":564
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \
  */
-  __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_gap_extend_penalty_must_be_0); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_gap_extend_penalty_must_be_0); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__10);
   __Pyx_GIVEREF(__pyx_tuple__10);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":580
+  /* "skbio/alignment/_ssw_wrapper.pyx":579
  *         if substitution_matrix is None:
  *             if protein:
  *                 raise Exception("Must provide a substitution matrix for"             # <<<<<<<<<<<<<<
  *                                 " protein sequences")
  *             matrix = self._build_match_matrix(match_score, mismatch_score)
  */
-  __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Must_provide_a_substitution_matr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Must_provide_a_substitution_matr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__11);
   __Pyx_GIVEREF(__pyx_tuple__11);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
@@ -9484,7 +9854,7 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_GOTREF(__pyx_tuple__12);
   __Pyx_GIVEREF(__pyx_tuple__12);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
@@ -9495,47 +9865,47 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_GOTREF(__pyx_tuple__13);
   __Pyx_GIVEREF(__pyx_tuple__13);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-  __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__14);
   __Pyx_GIVEREF(__pyx_tuple__14);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":799
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-  __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__15);
   __Pyx_GIVEREF(__pyx_tuple__15);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":803
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-  __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__16);
   __Pyx_GIVEREF(__pyx_tuple__16);
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-  __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__17);
   __Pyx_GIVEREF(__pyx_tuple__17);
   __Pyx_RefNannyFinishContext();
@@ -9605,18 +9975,24 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   }
   #endif
   __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__ssw_wrapper(void)", 0);
-  if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #ifdef __Pyx_CyFunction_USED
-  if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
   #ifdef __Pyx_FusedFunction_USED
   if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
+  #ifdef __Pyx_Coroutine_USED
+  if (__pyx_Coroutine_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
   #ifdef __Pyx_Generator_USED
   if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
+  #ifdef __Pyx_StopAsyncIteration_USED
+  if (__pyx_StopAsyncIteration_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
   /*--- Library function declarations ---*/
   /*--- Threads initialization code ---*/
   #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
@@ -9639,12 +10015,12 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   #endif
   if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
   /*--- Initialize various global constants etc. ---*/
-  if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitGlobals() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
   if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
   if (__pyx_module_is_main_skbio__alignment___ssw_wrapper) {
-    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   #if PY_MAJOR_VERSION >= 3
   {
@@ -9655,29 +10031,29 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   }
   #endif
   /*--- Builtin init code ---*/
-  if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitCachedBuiltins() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Constants init code ---*/
-  if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitCachedConstants() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Global init code ---*/
   /*--- Variable export code ---*/
   /*--- Function export code ---*/
   /*--- Type init code ---*/
   __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = &__pyx_vtable_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
   __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.__pyx___constructor = (PyObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *, s_align *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___constructor;
-  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.tp_print = 0;
-  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (PyObject_SetAttrString(__pyx_m, "AlignmentStructure", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyObject_SetAttrString(__pyx_m, "AlignmentStructure", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = &__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
   __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = &__pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
   __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._seq_converter = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__seq_converter;
   __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._build_match_matrix = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__build_match_matrix;
   __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._convert_dict2d_to_matrix = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__convert_dict2d_to_matrix;
-  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_print = 0;
   #if CYTHON_COMPILING_IN_CPYTHON
   {
-    PyObject *wrapper = PyObject_GetAttrString((PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, "__call__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    PyObject *wrapper = PyObject_GetAttrString((PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, "__call__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {
       __pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__ = *((PyWrapperDescrObject *)wrapper)->d_base;
       __pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__.doc = __pyx_doc_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__;
@@ -9685,8 +10061,8 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
     }
   }
   #endif
-  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (PyObject_SetAttrString(__pyx_m, "StripedSmithWaterman", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyObject_SetAttrString(__pyx_m, "StripedSmithWaterman", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = &__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
   /*--- Type import code ---*/
   __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", 
@@ -9702,17 +10078,20 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Variable import code ---*/
   /*--- Function import code ---*/
   /*--- Execution code ---*/
+  #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+  if (__Pyx_patch_abc() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
 
   /* "skbio/alignment/_ssw_wrapper.pyx":10
  * 
  * from cpython cimport bool
  * import numpy as np             # <<<<<<<<<<<<<<
  * cimport numpy as cnp
- * from skbio.alignment import Alignment
+ * from skbio.sequence import Protein, Sequence
  */
   __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
@@ -9722,450 +10101,429 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   /* "skbio/alignment/_ssw_wrapper.pyx":12
  * import numpy as np
  * cimport numpy as cnp
- * from skbio.alignment import Alignment             # <<<<<<<<<<<<<<
- * from skbio.sequence import Protein, Sequence
- * 
- */
-  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_n_s_Alignment);
-  __Pyx_GIVEREF(__pyx_n_s_Alignment);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_Alignment);
-  __pyx_t_2 = __Pyx_Import(__pyx_n_s_skbio_alignment, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_Alignment); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Alignment, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":13
- * cimport numpy as cnp
- * from skbio.alignment import Alignment
  * from skbio.sequence import Protein, Sequence             # <<<<<<<<<<<<<<
  * 
  * cdef extern from "_lib/ssw.h":
  */
-  __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_Protein);
   __Pyx_GIVEREF(__pyx_n_s_Protein);
-  PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_Protein);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_Protein);
   __Pyx_INCREF(__pyx_n_s_Sequence);
   __Pyx_GIVEREF(__pyx_n_s_Sequence);
-  PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_Sequence);
-  __pyx_t_1 = __Pyx_Import(__pyx_n_s_skbio_sequence, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Protein); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_Sequence);
+  __pyx_t_2 = __Pyx_Import(__pyx_n_s_skbio_sequence, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Protein, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Sequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_Protein); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Protein, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_Sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":51
+  /* "skbio/alignment/_ssw_wrapper.pyx":50
  *     cdef void align_destroy(s_align* a)
  * 
  * np_aa_table = np.array([             # <<<<<<<<<<<<<<
  *     23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
  *     23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
  */
-  __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = PyList_New(128); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyList_New(128); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 0, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 1, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 2, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 3, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 4, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 5, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 6, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 7, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 8, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 9, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 10, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 10, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 11, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 11, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 12, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 12, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 13, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 13, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 14, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 14, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 15, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 15, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 16, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 16, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 17, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 17, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 18, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 18, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 19, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 19, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 20, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 20, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 21, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 21, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 22, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 22, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 23, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 23, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 24, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 24, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 25, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 25, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 26, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 26, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 27, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 27, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 28, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 28, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 29, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 29, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 30, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 30, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 31, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 31, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 32, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 32, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 33, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 33, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 34, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 34, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 35, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 35, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 36, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 36, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 37, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 37, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 38, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 38, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 39, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 39, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 40, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 40, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 41, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 41, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 42, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 42, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 43, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 43, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 44, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 44, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 45, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 45, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 46, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 46, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 47, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 47, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 48, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 48, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 49, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 49, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 50, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 50, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 51, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 51, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 52, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 52, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 53, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 53, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 54, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 54, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 55, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 55, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 56, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 56, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 57, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 57, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 58, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 58, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 59, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 59, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 60, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 60, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 61, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 61, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 62, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 62, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 63, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 63, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 64, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 64, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_2, 65, __pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 65, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_20);
   __Pyx_GIVEREF(__pyx_int_20);
-  PyList_SET_ITEM(__pyx_t_2, 66, __pyx_int_20);
+  PyList_SET_ITEM(__pyx_t_1, 66, __pyx_int_20);
   __Pyx_INCREF(__pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_2, 67, __pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 67, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_3);
   __Pyx_GIVEREF(__pyx_int_3);
-  PyList_SET_ITEM(__pyx_t_2, 68, __pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_1, 68, __pyx_int_3);
   __Pyx_INCREF(__pyx_int_6);
   __Pyx_GIVEREF(__pyx_int_6);
-  PyList_SET_ITEM(__pyx_t_2, 69, __pyx_int_6);
+  PyList_SET_ITEM(__pyx_t_1, 69, __pyx_int_6);
   __Pyx_INCREF(__pyx_int_13);
   __Pyx_GIVEREF(__pyx_int_13);
-  PyList_SET_ITEM(__pyx_t_2, 70, __pyx_int_13);
+  PyList_SET_ITEM(__pyx_t_1, 70, __pyx_int_13);
   __Pyx_INCREF(__pyx_int_7);
   __Pyx_GIVEREF(__pyx_int_7);
-  PyList_SET_ITEM(__pyx_t_2, 71, __pyx_int_7);
+  PyList_SET_ITEM(__pyx_t_1, 71, __pyx_int_7);
   __Pyx_INCREF(__pyx_int_8);
   __Pyx_GIVEREF(__pyx_int_8);
-  PyList_SET_ITEM(__pyx_t_2, 72, __pyx_int_8);
+  PyList_SET_ITEM(__pyx_t_1, 72, __pyx_int_8);
   __Pyx_INCREF(__pyx_int_9);
   __Pyx_GIVEREF(__pyx_int_9);
-  PyList_SET_ITEM(__pyx_t_2, 73, __pyx_int_9);
+  PyList_SET_ITEM(__pyx_t_1, 73, __pyx_int_9);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 74, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 74, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_11);
   __Pyx_GIVEREF(__pyx_int_11);
-  PyList_SET_ITEM(__pyx_t_2, 75, __pyx_int_11);
+  PyList_SET_ITEM(__pyx_t_1, 75, __pyx_int_11);
   __Pyx_INCREF(__pyx_int_10);
   __Pyx_GIVEREF(__pyx_int_10);
-  PyList_SET_ITEM(__pyx_t_2, 76, __pyx_int_10);
+  PyList_SET_ITEM(__pyx_t_1, 76, __pyx_int_10);
   __Pyx_INCREF(__pyx_int_12);
   __Pyx_GIVEREF(__pyx_int_12);
-  PyList_SET_ITEM(__pyx_t_2, 77, __pyx_int_12);
+  PyList_SET_ITEM(__pyx_t_1, 77, __pyx_int_12);
   __Pyx_INCREF(__pyx_int_2);
   __Pyx_GIVEREF(__pyx_int_2);
-  PyList_SET_ITEM(__pyx_t_2, 78, __pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_1, 78, __pyx_int_2);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 79, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 79, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_14);
   __Pyx_GIVEREF(__pyx_int_14);
-  PyList_SET_ITEM(__pyx_t_2, 80, __pyx_int_14);
+  PyList_SET_ITEM(__pyx_t_1, 80, __pyx_int_14);
   __Pyx_INCREF(__pyx_int_5);
   __Pyx_GIVEREF(__pyx_int_5);
-  PyList_SET_ITEM(__pyx_t_2, 81, __pyx_int_5);
+  PyList_SET_ITEM(__pyx_t_1, 81, __pyx_int_5);
   __Pyx_INCREF(__pyx_int_1);
   __Pyx_GIVEREF(__pyx_int_1);
-  PyList_SET_ITEM(__pyx_t_2, 82, __pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_1, 82, __pyx_int_1);
   __Pyx_INCREF(__pyx_int_15);
   __Pyx_GIVEREF(__pyx_int_15);
-  PyList_SET_ITEM(__pyx_t_2, 83, __pyx_int_15);
+  PyList_SET_ITEM(__pyx_t_1, 83, __pyx_int_15);
   __Pyx_INCREF(__pyx_int_16);
   __Pyx_GIVEREF(__pyx_int_16);
-  PyList_SET_ITEM(__pyx_t_2, 84, __pyx_int_16);
+  PyList_SET_ITEM(__pyx_t_1, 84, __pyx_int_16);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 85, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 85, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_19);
   __Pyx_GIVEREF(__pyx_int_19);
-  PyList_SET_ITEM(__pyx_t_2, 86, __pyx_int_19);
+  PyList_SET_ITEM(__pyx_t_1, 86, __pyx_int_19);
   __Pyx_INCREF(__pyx_int_17);
   __Pyx_GIVEREF(__pyx_int_17);
-  PyList_SET_ITEM(__pyx_t_2, 87, __pyx_int_17);
+  PyList_SET_ITEM(__pyx_t_1, 87, __pyx_int_17);
   __Pyx_INCREF(__pyx_int_22);
   __Pyx_GIVEREF(__pyx_int_22);
-  PyList_SET_ITEM(__pyx_t_2, 88, __pyx_int_22);
+  PyList_SET_ITEM(__pyx_t_1, 88, __pyx_int_22);
   __Pyx_INCREF(__pyx_int_18);
   __Pyx_GIVEREF(__pyx_int_18);
-  PyList_SET_ITEM(__pyx_t_2, 89, __pyx_int_18);
+  PyList_SET_ITEM(__pyx_t_1, 89, __pyx_int_18);
   __Pyx_INCREF(__pyx_int_21);
   __Pyx_GIVEREF(__pyx_int_21);
-  PyList_SET_ITEM(__pyx_t_2, 90, __pyx_int_21);
+  PyList_SET_ITEM(__pyx_t_1, 90, __pyx_int_21);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 91, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 91, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 92, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 92, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 93, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 93, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 94, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 94, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 95, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 95, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 96, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 96, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_2, 97, __pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 97, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_20);
   __Pyx_GIVEREF(__pyx_int_20);
-  PyList_SET_ITEM(__pyx_t_2, 98, __pyx_int_20);
+  PyList_SET_ITEM(__pyx_t_1, 98, __pyx_int_20);
   __Pyx_INCREF(__pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_2, 99, __pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 99, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_3);
   __Pyx_GIVEREF(__pyx_int_3);
-  PyList_SET_ITEM(__pyx_t_2, 100, __pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_1, 100, __pyx_int_3);
   __Pyx_INCREF(__pyx_int_6);
   __Pyx_GIVEREF(__pyx_int_6);
-  PyList_SET_ITEM(__pyx_t_2, 101, __pyx_int_6);
+  PyList_SET_ITEM(__pyx_t_1, 101, __pyx_int_6);
   __Pyx_INCREF(__pyx_int_13);
   __Pyx_GIVEREF(__pyx_int_13);
-  PyList_SET_ITEM(__pyx_t_2, 102, __pyx_int_13);
+  PyList_SET_ITEM(__pyx_t_1, 102, __pyx_int_13);
   __Pyx_INCREF(__pyx_int_7);
   __Pyx_GIVEREF(__pyx_int_7);
-  PyList_SET_ITEM(__pyx_t_2, 103, __pyx_int_7);
+  PyList_SET_ITEM(__pyx_t_1, 103, __pyx_int_7);
   __Pyx_INCREF(__pyx_int_8);
   __Pyx_GIVEREF(__pyx_int_8);
-  PyList_SET_ITEM(__pyx_t_2, 104, __pyx_int_8);
+  PyList_SET_ITEM(__pyx_t_1, 104, __pyx_int_8);
   __Pyx_INCREF(__pyx_int_9);
   __Pyx_GIVEREF(__pyx_int_9);
-  PyList_SET_ITEM(__pyx_t_2, 105, __pyx_int_9);
+  PyList_SET_ITEM(__pyx_t_1, 105, __pyx_int_9);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 106, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 106, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_11);
   __Pyx_GIVEREF(__pyx_int_11);
-  PyList_SET_ITEM(__pyx_t_2, 107, __pyx_int_11);
+  PyList_SET_ITEM(__pyx_t_1, 107, __pyx_int_11);
   __Pyx_INCREF(__pyx_int_10);
   __Pyx_GIVEREF(__pyx_int_10);
-  PyList_SET_ITEM(__pyx_t_2, 108, __pyx_int_10);
+  PyList_SET_ITEM(__pyx_t_1, 108, __pyx_int_10);
   __Pyx_INCREF(__pyx_int_12);
   __Pyx_GIVEREF(__pyx_int_12);
-  PyList_SET_ITEM(__pyx_t_2, 109, __pyx_int_12);
+  PyList_SET_ITEM(__pyx_t_1, 109, __pyx_int_12);
   __Pyx_INCREF(__pyx_int_2);
   __Pyx_GIVEREF(__pyx_int_2);
-  PyList_SET_ITEM(__pyx_t_2, 110, __pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_1, 110, __pyx_int_2);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 111, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 111, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_14);
   __Pyx_GIVEREF(__pyx_int_14);
-  PyList_SET_ITEM(__pyx_t_2, 112, __pyx_int_14);
+  PyList_SET_ITEM(__pyx_t_1, 112, __pyx_int_14);
   __Pyx_INCREF(__pyx_int_5);
   __Pyx_GIVEREF(__pyx_int_5);
-  PyList_SET_ITEM(__pyx_t_2, 113, __pyx_int_5);
+  PyList_SET_ITEM(__pyx_t_1, 113, __pyx_int_5);
   __Pyx_INCREF(__pyx_int_1);
   __Pyx_GIVEREF(__pyx_int_1);
-  PyList_SET_ITEM(__pyx_t_2, 114, __pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_1, 114, __pyx_int_1);
   __Pyx_INCREF(__pyx_int_15);
   __Pyx_GIVEREF(__pyx_int_15);
-  PyList_SET_ITEM(__pyx_t_2, 115, __pyx_int_15);
+  PyList_SET_ITEM(__pyx_t_1, 115, __pyx_int_15);
   __Pyx_INCREF(__pyx_int_16);
   __Pyx_GIVEREF(__pyx_int_16);
-  PyList_SET_ITEM(__pyx_t_2, 116, __pyx_int_16);
+  PyList_SET_ITEM(__pyx_t_1, 116, __pyx_int_16);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 117, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 117, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_19);
   __Pyx_GIVEREF(__pyx_int_19);
-  PyList_SET_ITEM(__pyx_t_2, 118, __pyx_int_19);
+  PyList_SET_ITEM(__pyx_t_1, 118, __pyx_int_19);
   __Pyx_INCREF(__pyx_int_17);
   __Pyx_GIVEREF(__pyx_int_17);
-  PyList_SET_ITEM(__pyx_t_2, 119, __pyx_int_17);
+  PyList_SET_ITEM(__pyx_t_1, 119, __pyx_int_17);
   __Pyx_INCREF(__pyx_int_22);
   __Pyx_GIVEREF(__pyx_int_22);
-  PyList_SET_ITEM(__pyx_t_2, 120, __pyx_int_22);
+  PyList_SET_ITEM(__pyx_t_1, 120, __pyx_int_22);
   __Pyx_INCREF(__pyx_int_18);
   __Pyx_GIVEREF(__pyx_int_18);
-  PyList_SET_ITEM(__pyx_t_2, 121, __pyx_int_18);
+  PyList_SET_ITEM(__pyx_t_1, 121, __pyx_int_18);
   __Pyx_INCREF(__pyx_int_21);
   __Pyx_GIVEREF(__pyx_int_21);
-  PyList_SET_ITEM(__pyx_t_2, 122, __pyx_int_21);
+  PyList_SET_ITEM(__pyx_t_1, 122, __pyx_int_21);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 123, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 123, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 124, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 124, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 125, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 125, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 126, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 126, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_2, 127, __pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 127, __pyx_int_23);
   __pyx_t_4 = NULL;
   if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
     __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
@@ -10177,37 +10535,37 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
     }
   }
   if (!__pyx_t_4) {
-    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_GOTREF(__pyx_t_2);
   } else {
-    __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_5);
     __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL;
-    __Pyx_GIVEREF(__pyx_t_2);
-    PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2);
-    __pyx_t_2 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_GIVEREF(__pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_1);
+    __pyx_t_1 = 0;
+    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   }
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_aa_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_aa_table, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":61
+  /* "skbio/alignment/_ssw_wrapper.pyx":60
  *     14,  5,  1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23])
  * 
  * np_nt_table = np.array([             # <<<<<<<<<<<<<<
  *     4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
  *     4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
  */
-  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_5);
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = PyList_New(128); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyList_New(128); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_INCREF(__pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
@@ -10593,48 +10951,48 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   __Pyx_INCREF(__pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
   PyList_SET_ITEM(__pyx_t_3, 127, __pyx_int_4);
-  __pyx_t_2 = NULL;
+  __pyx_t_1 = NULL;
   if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_5))) {
-    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5);
-    if (likely(__pyx_t_2)) {
+    __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_5);
+    if (likely(__pyx_t_1)) {
       PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-      __Pyx_INCREF(__pyx_t_2);
+      __Pyx_INCREF(__pyx_t_1);
       __Pyx_INCREF(function);
       __Pyx_DECREF_SET(__pyx_t_5, function);
     }
   }
-  if (!__pyx_t_2) {
-    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!__pyx_t_1) {
+    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_GOTREF(__pyx_t_2);
   } else {
-    __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL;
+    __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __pyx_t_1 = NULL;
     __Pyx_GIVEREF(__pyx_t_3);
     PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_3);
     __pyx_t_3 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   }
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_nt_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_nt_table, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":71
+  /* "skbio/alignment/_ssw_wrapper.pyx":70
  *     4,  4,  4,  4,  3,  0,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4])
  * 
  * mid_table = np.array(['M', 'I', 'D'])             # <<<<<<<<<<<<<<
  * 
  * 
  */
-  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_5);
   __Pyx_INCREF(__pyx_n_s_M);
   __Pyx_GIVEREF(__pyx_n_s_M);
@@ -10656,370 +11014,370 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
     }
   }
   if (!__pyx_t_3) {
-    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_GOTREF(__pyx_t_1);
-  } else {
-    __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = NULL;
+  } else {
+    __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = NULL;
     __Pyx_GIVEREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_5);
     __pyx_t_5 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   }
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_mid_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_mid_table, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":133
+  /* "skbio/alignment/_ssw_wrapper.pyx":132
  * 
  *     @property
  *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
  *         """Optimal alignment score
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":132
+  /* "skbio/alignment/_ssw_wrapper.pyx":131
  *         return score
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def optimal_alignment_score(self):
  *         """Optimal alignment score
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_optimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_optimal_alignment_score, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":145
+  /* "skbio/alignment/_ssw_wrapper.pyx":144
  * 
  *     @property
  *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
  *         """Suboptimal alignment score
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_suboptimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_suboptimal_alignment_score); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":144
+  /* "skbio/alignment/_ssw_wrapper.pyx":143
  *         return self.p.score1
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def suboptimal_alignment_score(self):
  *         """Suboptimal alignment score
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_suboptimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_suboptimal_alignment_score, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":157
+  /* "skbio/alignment/_ssw_wrapper.pyx":156
  * 
  *     @property
  *     def target_begin(self):             # <<<<<<<<<<<<<<
  *         """Character index where the target's alignment begins
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_begin); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":156
+  /* "skbio/alignment/_ssw_wrapper.pyx":155
  *         return self.p.score2
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_begin(self):
  *         """Character index where the target's alignment begins
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_begin, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":174
+  /* "skbio/alignment/_ssw_wrapper.pyx":173
  * 
  *     @property
  *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
  *         """Character index where the target's optimal alignment ends
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":173
+  /* "skbio/alignment/_ssw_wrapper.pyx":172
  *                                                             >= 0) else -1
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_end_optimal(self):
  *         """Character index where the target's optimal alignment ends
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_optimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_optimal, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":191
+  /* "skbio/alignment/_ssw_wrapper.pyx":190
  * 
  *     @property
  *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
  *         """Character index where the target's suboptimal alignment ends
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_suboptimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_suboptimal); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":190
+  /* "skbio/alignment/_ssw_wrapper.pyx":189
  *         return self.p.ref_end1 + self.index_starts_at
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_end_suboptimal(self):
  *         """Character index where the target's suboptimal alignment ends
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_suboptimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_suboptimal, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":208
+  /* "skbio/alignment/_ssw_wrapper.pyx":207
  * 
  *     @property
  *     def query_begin(self):             # <<<<<<<<<<<<<<
  *         """Returns the character index at which the query sequence begins
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":207
+  /* "skbio/alignment/_ssw_wrapper.pyx":206
  *         return self.p.ref_end2 + self.index_starts_at
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def query_begin(self):
  *         """Returns the character index at which the query sequence begins
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_begin, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":225
+  /* "skbio/alignment/_ssw_wrapper.pyx":224
  * 
  *     @property
  *     def query_end(self):             # <<<<<<<<<<<<<<
  *         """Character index at where query sequence ends
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_end); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_end); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":224
+  /* "skbio/alignment/_ssw_wrapper.pyx":223
  *                                                              >= 0) else -1
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def query_end(self):
  *         """Character index at where query sequence ends
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_end, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_end, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":241
+  /* "skbio/alignment/_ssw_wrapper.pyx":240
  * 
  *     @property
  *     def cigar(self):             # <<<<<<<<<<<<<<
  *         """Cigar formatted string for the optimal alignment
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":240
+  /* "skbio/alignment/_ssw_wrapper.pyx":239
  *         return self.p.read_end1 + self.index_starts_at
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def cigar(self):
  *         """Cigar formatted string for the optimal alignment
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_cigar, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_cigar, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":279
+  /* "skbio/alignment/_ssw_wrapper.pyx":278
  * 
  *     @property
  *     def query_sequence(self):             # <<<<<<<<<<<<<<
  *         """Query sequence
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":278
+  /* "skbio/alignment/_ssw_wrapper.pyx":277
  *         return self._cigar_string
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def query_sequence(self):
  *         """Query sequence
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_sequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":291
+  /* "skbio/alignment/_ssw_wrapper.pyx":290
  * 
  *     @property
  *     def target_sequence(self):             # <<<<<<<<<<<<<<
  *         """Target sequence
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":290
+  /* "skbio/alignment/_ssw_wrapper.pyx":289
  *         return self.read_sequence
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_sequence(self):
  *         """Target sequence
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_sequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":303
+  /* "skbio/alignment/_ssw_wrapper.pyx":302
  * 
  *     @property
  *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
  *         """Returns the query sequence aligned by the cigar
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":302
+  /* "skbio/alignment/_ssw_wrapper.pyx":301
  *         return self.reference_sequence
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def aligned_query_sequence(self):
  *         """Returns the query sequence aligned by the cigar
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_query_sequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":325
+  /* "skbio/alignment/_ssw_wrapper.pyx":324
  * 
  *     @property
  *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
  *         """Returns the target sequence aligned by the cigar
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":324
+  /* "skbio/alignment/_ssw_wrapper.pyx":323
  *         return None
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def aligned_target_sequence(self):
  *         """Returns the target sequence aligned by the cigar
  */
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_target_sequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
   /* "skbio/alignment/_ssw_wrapper.pyx":1
@@ -11027,12 +11385,12 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
  * #  Copyright (c) 2013--, scikit-bio development team.
  * #
  */
-  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -11511,8 +11869,12 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObjec
 }
 #else
 static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
-    PyObject* args = PyTuple_Pack(1, arg);
-    return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL;
+    PyObject *result;
+    PyObject *args = PyTuple_Pack(1, arg);
+    if (unlikely(!args)) return NULL;
+    result = __Pyx_PyObject_Call(func, args, NULL);
+    Py_DECREF(args);
+    return result;
 }
 #endif
 
@@ -11531,6 +11893,107 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
 }
 #endif
 
+#if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
+    #if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_CheckExact(op1))) {
+        const long b = intval;
+        long x;
+        long a = PyInt_AS_LONG(op1);
+            x = (long)((unsigned long)a + b);
+            if (likely((x^a) >= 0 || (x^b) >= 0))
+                return PyInt_FromLong(x);
+            return PyLong_Type.tp_as_number->nb_add(op1, op2);
+    }
+    #endif
+    #if CYTHON_USE_PYLONG_INTERNALS && PY_MAJOR_VERSION >= 3
+    if (likely(PyLong_CheckExact(op1))) {
+        const long b = intval;
+        long a, x;
+        const PY_LONG_LONG llb = intval;
+        PY_LONG_LONG lla, llx;
+        const digit* digits = ((PyLongObject*)op1)->ob_digit;
+        const Py_ssize_t size = Py_SIZE(op1);
+        if (likely(__Pyx_sst_abs(size) <= 1)) {
+            a = likely(size) ? digits[0] : 0;
+            if (size == -1) a = -a;
+        } else {
+            switch (size) {
+                case -2:
+                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                        a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
+                        break;
+                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
+                        lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
+                        goto long_long;
+                    }
+                case 2:
+                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                        a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
+                        break;
+                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
+                        lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
+                        goto long_long;
+                    }
+                case -3:
+                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                        a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
+                        break;
+                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
+                        lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
+                        goto long_long;
+                    }
+                case 3:
+                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                        a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
+                        break;
+                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
+                        lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
+                        goto long_long;
+                    }
+                case -4:
+                    if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                        a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
+                        break;
+                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
+                        lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
+                        goto long_long;
+                    }
+                case 4:
+                    if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                        a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
+                        break;
+                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
+                        lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
+                        goto long_long;
+                    }
+                default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
+            }
+        }
+                x = a + b;
+            return PyLong_FromLong(x);
+        long_long:
+                llx = lla + llb;
+            return PyLong_FromLongLong(llx);
+    }
+    #endif
+    if (PyFloat_CheckExact(op1)) {
+        const long b = intval;
+        double a = PyFloat_AS_DOUBLE(op1);
+            double result;
+            PyFPE_START_PROTECT("add", return NULL)
+            result = ((double)a) + (double)b;
+            PyFPE_END_PROTECT(result)
+            return PyFloat_FromDouble(result);
+    }
+    return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
+}
+#endif
+
 static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
     PyErr_Format(PyExc_ValueError,
                  "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
@@ -12471,6 +12934,64 @@ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b)
     return q;
 }
 
+static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) {
+   Py_ssize_t length;
+   #if CYTHON_PEP393_ENABLED
+   length = PyUnicode_GET_LENGTH(x);
+   if (likely(length == 1)) {
+       return PyUnicode_READ_CHAR(x, 0);
+   }
+   #else
+   length = PyUnicode_GET_SIZE(x);
+   if (likely(length == 1)) {
+       return PyUnicode_AS_UNICODE(x)[0];
+   }
+   #if Py_UNICODE_SIZE == 2
+   else if (PyUnicode_GET_SIZE(x) == 2) {
+       Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0];
+       if (high_val >= 0xD800 && high_val <= 0xDBFF) {
+           Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1];
+           if (low_val >= 0xDC00 && low_val <= 0xDFFF) {
+               return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1)));
+           }
+       }
+   }
+   #endif
+   #endif
+   PyErr_Format(PyExc_ValueError,
+                "only single character unicode strings can be converted to Py_UCS4, "
+                "got length %" CYTHON_FORMAT_SSIZE_T "d", length);
+   return (Py_UCS4)-1;
+}
+
+static long __Pyx__PyObject_Ord(PyObject* c) {
+    Py_ssize_t size;
+    if (PyBytes_Check(c)) {
+        size = PyBytes_GET_SIZE(c);
+        if (likely(size == 1)) {
+            return (unsigned char) PyBytes_AS_STRING(c)[0];
+        }
+#if PY_MAJOR_VERSION < 3
+    } else if (PyUnicode_Check(c)) {
+        return (long)__Pyx_PyUnicode_AsPy_UCS4(c);
+#endif
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+    } else if (PyByteArray_Check(c)) {
+        size = PyByteArray_GET_SIZE(c);
+        if (likely(size == 1)) {
+            return (unsigned char) PyByteArray_AS_STRING(c)[0];
+        }
+#endif
+    } else {
+        PyErr_Format(PyExc_TypeError,
+            "ord() expected string of length 1, but %.200s found", c->ob_type->tp_name);
+        return (long)(Py_UCS4)-1;
+    }
+    PyErr_Format(PyExc_TypeError,
+        "ord() expected a character, but string of length %zd found", size);
+    return (long)(Py_UCS4)-1;
+}
+
 static void __Pyx_RaiseBufferIndexError(int axis) {
   PyErr_Format(PyExc_IndexError,
      "Out of bounds on buffer access (axis %d)", axis);
@@ -12497,39 +13018,112 @@ bad:
     return -1;
 }
 
-static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
-    PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
-    if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
-        PyErr_Format(PyExc_ImportError,
-        #if PY_MAJOR_VERSION < 3
-            "cannot import name %.230s", PyString_AS_STRING(name));
-        #else
-            "cannot import name %S", name);
-        #endif
-    }
-    return value;
-}
-
-static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name) {
-    PyObject *result;
-    result = __Pyx_PyObject_GetAttrStr(nmspace, name);
-    if (!result)
-        result = __Pyx_GetModuleGlobalName(name);
-    return result;
-}
-
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
-    int start = 0, mid = 0, end = count - 1;
-    if (end >= 0 && code_line > entries[end].code_line) {
-        return count;
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+    PyObject *empty_list = 0;
+    PyObject *module = 0;
+    PyObject *global_dict = 0;
+    PyObject *empty_dict = 0;
+    PyObject *list;
+    #if PY_VERSION_HEX < 0x03030000
+    PyObject *py_import;
+    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+    if (!py_import)
+        goto bad;
+    #endif
+    if (from_list)
+        list = from_list;
+    else {
+        empty_list = PyList_New(0);
+        if (!empty_list)
+            goto bad;
+        list = empty_list;
     }
-    while (start < end) {
-        mid = (start + end) / 2;
-        if (code_line < entries[mid].code_line) {
-            end = mid;
-        } else if (code_line > entries[mid].code_line) {
-             start = mid + 1;
-        } else {
+    global_dict = PyModule_GetDict(__pyx_m);
+    if (!global_dict)
+        goto bad;
+    empty_dict = PyDict_New();
+    if (!empty_dict)
+        goto bad;
+    {
+        #if PY_MAJOR_VERSION >= 3
+        if (level == -1) {
+            if (strchr(__Pyx_MODULE_NAME, '.')) {
+                #if PY_VERSION_HEX < 0x03030000
+                PyObject *py_level = PyInt_FromLong(1);
+                if (!py_level)
+                    goto bad;
+                module = PyObject_CallFunctionObjArgs(py_import,
+                    name, global_dict, empty_dict, list, py_level, NULL);
+                Py_DECREF(py_level);
+                #else
+                module = PyImport_ImportModuleLevelObject(
+                    name, global_dict, empty_dict, list, 1);
+                #endif
+                if (!module) {
+                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
+                        goto bad;
+                    PyErr_Clear();
+                }
+            }
+            level = 0;
+        }
+        #endif
+        if (!module) {
+            #if PY_VERSION_HEX < 0x03030000
+            PyObject *py_level = PyInt_FromLong(level);
+            if (!py_level)
+                goto bad;
+            module = PyObject_CallFunctionObjArgs(py_import,
+                name, global_dict, empty_dict, list, py_level, NULL);
+            Py_DECREF(py_level);
+            #else
+            module = PyImport_ImportModuleLevelObject(
+                name, global_dict, empty_dict, list, level);
+            #endif
+        }
+    }
+bad:
+    #if PY_VERSION_HEX < 0x03030000
+    Py_XDECREF(py_import);
+    #endif
+    Py_XDECREF(empty_list);
+    Py_XDECREF(empty_dict);
+    return module;
+}
+
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+    PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+    if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+        PyErr_Format(PyExc_ImportError,
+        #if PY_MAJOR_VERSION < 3
+            "cannot import name %.230s", PyString_AS_STRING(name));
+        #else
+            "cannot import name %S", name);
+        #endif
+    }
+    return value;
+}
+
+static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name) {
+    PyObject *result;
+    result = __Pyx_PyObject_GetAttrStr(nmspace, name);
+    if (!result)
+        result = __Pyx_GetModuleGlobalName(name);
+    return result;
+}
+
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+    int start = 0, mid = 0, end = count - 1;
+    if (end >= 0 && code_line > entries[end].code_line) {
+        return count;
+    }
+    while (start < end) {
+        mid = start + (end - start) / 2;
+        if (code_line < entries[mid].code_line) {
+            end = mid;
+        } else if (code_line > entries[mid].code_line) {
+             start = mid + 1;
+        } else {
             return mid;
         }
     }
@@ -12698,102 +13292,29 @@ static void __Pyx_ReleaseBuffer(Py_buffer *view) {
 #endif
 
 
-          static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
-    PyObject *empty_list = 0;
-    PyObject *module = 0;
-    PyObject *global_dict = 0;
-    PyObject *empty_dict = 0;
-    PyObject *list;
-    #if PY_VERSION_HEX < 0x03030000
-    PyObject *py_import;
-    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
-    if (!py_import)
-        goto bad;
-    #endif
-    if (from_list)
-        list = from_list;
-    else {
-        empty_list = PyList_New(0);
-        if (!empty_list)
-            goto bad;
-        list = empty_list;
-    }
-    global_dict = PyModule_GetDict(__pyx_m);
-    if (!global_dict)
-        goto bad;
-    empty_dict = PyDict_New();
-    if (!empty_dict)
-        goto bad;
-    {
-        #if PY_MAJOR_VERSION >= 3
-        if (level == -1) {
-            if (strchr(__Pyx_MODULE_NAME, '.')) {
-                #if PY_VERSION_HEX < 0x03030000
-                PyObject *py_level = PyInt_FromLong(1);
-                if (!py_level)
-                    goto bad;
-                module = PyObject_CallFunctionObjArgs(py_import,
-                    name, global_dict, empty_dict, list, py_level, NULL);
-                Py_DECREF(py_level);
-                #else
-                module = PyImport_ImportModuleLevelObject(
-                    name, global_dict, empty_dict, list, 1);
-                #endif
-                if (!module) {
-                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
-                        goto bad;
-                    PyErr_Clear();
-                }
-            }
-            level = 0;
-        }
-        #endif
-        if (!module) {
-            #if PY_VERSION_HEX < 0x03030000
-            PyObject *py_level = PyInt_FromLong(level);
-            if (!py_level)
-                goto bad;
-            module = PyObject_CallFunctionObjArgs(py_import,
-                name, global_dict, empty_dict, list, py_level, NULL);
-            Py_DECREF(py_level);
-            #else
-            module = PyImport_ImportModuleLevelObject(
-                name, global_dict, empty_dict, list, level);
-            #endif
-        }
-    }
-bad:
-    #if PY_VERSION_HEX < 0x03030000
-    Py_XDECREF(py_import);
-    #endif
-    Py_XDECREF(empty_list);
-    Py_XDECREF(empty_dict);
-    return module;
-}
-
-#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)       \
-    {                                                                     \
-        func_type value = func_value;                                     \
-        if (sizeof(target_type) < sizeof(func_type)) {                    \
-            if (unlikely(value != (func_type) (target_type) value)) {     \
-                func_type zero = 0;                                       \
-                if (is_unsigned && unlikely(value < zero))                \
-                    goto raise_neg_overflow;                              \
-                else                                                      \
-                    goto raise_overflow;                                  \
-            }                                                             \
-        }                                                                 \
-        return (target_type) value;                                       \
+          #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+    {\
+        func_type value = func_value;\
+        if (sizeof(target_type) < sizeof(func_type)) {\
+            if (unlikely(value != (func_type) (target_type) value)) {\
+                func_type zero = 0;\
+                if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+                    return (target_type) -1;\
+                if (is_unsigned && unlikely(value < zero))\
+                    goto raise_neg_overflow;\
+                else\
+                    goto raise_overflow;\
+            }\
+        }\
+        return (target_type) value;\
     }
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
-
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
-    const int neg_one = (int) -1, const_zero = 0;
+    const int neg_one = (int) -1, const_zero = (int) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -12810,13 +13331,39 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (int) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+                            return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+                            return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+                            return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
 #if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
@@ -12832,24 +13379,77 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
             }
 #endif
             if (sizeof(int) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (int) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                            return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                            return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+                            return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(int) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
             } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(int, PY_LONG_LONG, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -12898,7 +13498,7 @@ raise_neg_overflow:
 }
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint16(npy_uint16 value) {
-    const npy_uint16 neg_one = (npy_uint16) -1, const_zero = 0;
+    const npy_uint16 neg_one = (npy_uint16) -1, const_zero = (npy_uint16) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(npy_uint16) < sizeof(long)) {
@@ -12923,34 +13523,8 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint16(npy_uint16 value) {
     }
 }
 
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
-    const int neg_one = (int) -1, const_zero = 0;
-    const int is_unsigned = neg_one > const_zero;
-    if (is_unsigned) {
-        if (sizeof(int) < sizeof(long)) {
-            return PyInt_FromLong((long) value);
-        } else if (sizeof(int) <= sizeof(unsigned long)) {
-            return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
-            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
-        }
-    } else {
-        if (sizeof(int) <= sizeof(long)) {
-            return PyInt_FromLong((long) value);
-        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
-            return PyLong_FromLongLong((PY_LONG_LONG) value);
-        }
-    }
-    {
-        int one = 1; int little = (int)*(unsigned char *)&one;
-        unsigned char *bytes = (unsigned char *)&value;
-        return _PyLong_FromByteArray(bytes, sizeof(int),
-                                     little, !is_unsigned);
-    }
-}
-
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int32(npy_int32 value) {
-    const npy_int32 neg_one = (npy_int32) -1, const_zero = 0;
+    const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(npy_int32) < sizeof(long)) {
@@ -12976,7 +13550,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int32(npy_int32 value) {
 }
 
 static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
-    const npy_int32 neg_one = (npy_int32) -1, const_zero = 0;
+    const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -12993,13 +13567,39 @@ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_int32) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) {
+                            return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) {
+                            return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) {
+                            return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
 #if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
@@ -13015,24 +13615,77 @@ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
             }
 #endif
             if (sizeof(npy_int32) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, PyLong_AsUnsignedLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_int32,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_int32) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int32,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(npy_int32) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, long, PyLong_AsLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x))
             } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13081,7 +13734,7 @@ raise_neg_overflow:
 }
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
-    const long neg_one = (long) -1, const_zero = 0;
+    const long neg_one = (long) -1, const_zero = (long) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(long) < sizeof(long)) {
@@ -13107,7 +13760,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
 }
 
 static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *x) {
-    const npy_uint8 neg_one = (npy_uint8) -1, const_zero = 0;
+    const npy_uint8 neg_one = (npy_uint8) -1, const_zero = (npy_uint8) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -13124,13 +13777,39 @@ static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_uint8, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_uint8) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint8, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(npy_uint8) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) >= 2 * PyLong_SHIFT) {
+                            return (npy_uint8) (((((npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_uint8) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) >= 3 * PyLong_SHIFT) {
+                            return (npy_uint8) (((((((npy_uint8)digits[2]) << PyLong_SHIFT) | (npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_uint8) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) >= 4 * PyLong_SHIFT) {
+                            return (npy_uint8) (((((((((npy_uint8)digits[3]) << PyLong_SHIFT) | (npy_uint8)digits[2]) << PyLong_SHIFT) | (npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
 #if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
@@ -13146,24 +13825,77 @@ static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *x) {
             }
 #endif
             if (sizeof(npy_uint8) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, PyLong_AsUnsignedLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint8, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(npy_uint8) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint8, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_uint8,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(npy_uint8, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_uint8) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(npy_uint8, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint8,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(npy_uint8) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_uint8) (((npy_uint8)-1)*(((((npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(npy_uint8) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_uint8) ((((((npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(npy_uint8) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_uint8) (((npy_uint8)-1)*(((((((npy_uint8)digits[2]) << PyLong_SHIFT) | (npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_uint8) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_uint8) ((((((((npy_uint8)digits[2]) << PyLong_SHIFT) | (npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(npy_uint8) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_uint8) (((npy_uint8)-1)*(((((((((npy_uint8)digits[3]) << PyLong_SHIFT) | (npy_uint8)digits[2]) << PyLong_SHIFT) | (npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_uint8) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint8) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_uint8) ((((((((((npy_uint8)digits[3]) << PyLong_SHIFT) | (npy_uint8)digits[2]) << PyLong_SHIFT) | (npy_uint8)digits[1]) << PyLong_SHIFT) | (npy_uint8)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(npy_uint8) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, long, PyLong_AsLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint8, long, PyLong_AsLong(x))
             } else if (sizeof(npy_uint8) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, PY_LONG_LONG, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint8, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13212,7 +13944,7 @@ raise_neg_overflow:
 }
 
 static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *x) {
-    const npy_uint16 neg_one = (npy_uint16) -1, const_zero = 0;
+    const npy_uint16 neg_one = (npy_uint16) -1, const_zero = (npy_uint16) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -13229,13 +13961,39 @@ static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_uint16, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_uint16) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint16, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(npy_uint16) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) >= 2 * PyLong_SHIFT) {
+                            return (npy_uint16) (((((npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_uint16) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) >= 3 * PyLong_SHIFT) {
+                            return (npy_uint16) (((((((npy_uint16)digits[2]) << PyLong_SHIFT) | (npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_uint16) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) >= 4 * PyLong_SHIFT) {
+                            return (npy_uint16) (((((((((npy_uint16)digits[3]) << PyLong_SHIFT) | (npy_uint16)digits[2]) << PyLong_SHIFT) | (npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
 #if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
@@ -13251,24 +14009,77 @@ static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *x) {
             }
 #endif
             if (sizeof(npy_uint16) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, PyLong_AsUnsignedLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint16, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(npy_uint16) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint16, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_uint16,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(npy_uint16, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_uint16) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(npy_uint16, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint16,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(npy_uint16) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_uint16) (((npy_uint16)-1)*(((((npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(npy_uint16) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_uint16) ((((((npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(npy_uint16) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_uint16) (((npy_uint16)-1)*(((((((npy_uint16)digits[2]) << PyLong_SHIFT) | (npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_uint16) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_uint16) ((((((((npy_uint16)digits[2]) << PyLong_SHIFT) | (npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(npy_uint16) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_uint16) (((npy_uint16)-1)*(((((((((npy_uint16)digits[3]) << PyLong_SHIFT) | (npy_uint16)digits[2]) << PyLong_SHIFT) | (npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_uint16) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_uint16) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_uint16) ((((((((((npy_uint16)digits[3]) << PyLong_SHIFT) | (npy_uint16)digits[2]) << PyLong_SHIFT) | (npy_uint16)digits[1]) << PyLong_SHIFT) | (npy_uint16)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(npy_uint16) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, long, PyLong_AsLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint16, long, PyLong_AsLong(x))
             } else if (sizeof(npy_uint16) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, PY_LONG_LONG, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_uint16, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13317,7 +14128,7 @@ raise_neg_overflow:
 }
 
 static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
-    const npy_int8 neg_one = (npy_int8) -1, const_zero = 0;
+    const npy_int8 neg_one = (npy_int8) -1, const_zero = (npy_int8) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -13334,13 +14145,39 @@ static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_int8, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_int8) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int8, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(npy_int8) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) >= 2 * PyLong_SHIFT) {
+                            return (npy_int8) (((((npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_int8) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) >= 3 * PyLong_SHIFT) {
+                            return (npy_int8) (((((((npy_int8)digits[2]) << PyLong_SHIFT) | (npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_int8) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) >= 4 * PyLong_SHIFT) {
+                            return (npy_int8) (((((((((npy_int8)digits[3]) << PyLong_SHIFT) | (npy_int8)digits[2]) << PyLong_SHIFT) | (npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
 #if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
@@ -13356,24 +14193,77 @@ static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
             }
 #endif
             if (sizeof(npy_int8) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, PyLong_AsUnsignedLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int8, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(npy_int8) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int8, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(npy_int8,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(npy_int8, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_int8) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(npy_int8, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int8,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(npy_int8) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_int8) (((npy_int8)-1)*(((((npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(npy_int8) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_int8) ((((((npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(npy_int8) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_int8) (((npy_int8)-1)*(((((((npy_int8)digits[2]) << PyLong_SHIFT) | (npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_int8) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_int8) ((((((((npy_int8)digits[2]) << PyLong_SHIFT) | (npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(npy_int8) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_int8) (((npy_int8)-1)*(((((((((npy_int8)digits[3]) << PyLong_SHIFT) | (npy_int8)digits[2]) << PyLong_SHIFT) | (npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_int8) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int8) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_int8) ((((((((((npy_int8)digits[3]) << PyLong_SHIFT) | (npy_int8)digits[2]) << PyLong_SHIFT) | (npy_int8)digits[1]) << PyLong_SHIFT) | (npy_int8)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(npy_int8) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, long, PyLong_AsLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int8, long, PyLong_AsLong(x))
             } else if (sizeof(npy_int8) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, PY_LONG_LONG, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int8, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13421,6 +14311,32 @@ raise_neg_overflow:
     return (npy_int8) -1;
 }
 
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
+    const int neg_one = (int) -1, const_zero = (int) 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(int) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(int) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+        }
+    } else {
+        if (sizeof(int) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(int),
+                                     little, !is_unsigned);
+    }
+}
+
 #if CYTHON_CCOMPLEX
   #ifdef __cplusplus
     static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
@@ -13661,8 +14577,34 @@ raise_neg_overflow:
     #endif
 #endif
 
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
+    const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(enum NPY_TYPES) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+        }
+    } else {
+        if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
+                                     little, !is_unsigned);
+    }
+}
+
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
-    const long neg_one = (long) -1, const_zero = 0;
+    const long neg_one = (long) -1, const_zero = (long) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -13679,13 +14621,39 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (long) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+                            return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+                            return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+                            return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
 #if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
@@ -13701,24 +14669,77 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
             }
 #endif
             if (sizeof(long) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (long) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                            return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                            return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                            return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(long) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
             } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT(long, PY_LONG_LONG, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13900,7 +14921,7 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
     return __Pyx_PyObject_AsStringAndSize(o, &ignore);
 }
 static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
-#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
     if (
 #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
             __Pyx_sys_getdefaultencoding_not_ascii &&
@@ -13941,7 +14962,7 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
 #endif
     } else
 #endif
-#if !CYTHON_COMPILING_IN_PYPY
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
     if (PyByteArray_Check(o)) {
         *length = PyByteArray_GET_SIZE(o);
         return PyByteArray_AS_STRING(o);
@@ -13971,7 +14992,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
 #else
   if (PyLong_Check(x))
 #endif
-    return Py_INCREF(x), x;
+    return __Pyx_NewRef(x);
   m = Py_TYPE(x)->tp_as_number;
 #if PY_MAJOR_VERSION < 3
   if (m && m->nb_int) {
@@ -14011,18 +15032,55 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
   Py_ssize_t ival;
   PyObject *x;
 #if PY_MAJOR_VERSION < 3
-  if (likely(PyInt_CheckExact(b)))
-      return PyInt_AS_LONG(b);
+  if (likely(PyInt_CheckExact(b))) {
+    if (sizeof(Py_ssize_t) >= sizeof(long))
+        return PyInt_AS_LONG(b);
+    else
+        return PyInt_AsSsize_t(x);
+  }
 #endif
   if (likely(PyLong_CheckExact(b))) {
-    #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
-     #if CYTHON_USE_PYLONG_INTERNALS
-       switch (Py_SIZE(b)) {
-       case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
-       case  0: return 0;
-       case  1: return ((PyLongObject*)b)->ob_digit[0];
-       }
-     #endif
+    #if CYTHON_USE_PYLONG_INTERNALS
+    const digit* digits = ((PyLongObject*)b)->ob_digit;
+    const Py_ssize_t size = Py_SIZE(b);
+    if (likely(__Pyx_sst_abs(size) <= 1)) {
+        ival = likely(size) ? digits[0] : 0;
+        if (size == -1) ival = -ival;
+        return ival;
+    } else {
+      switch (size) {
+         case 2:
+           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -2:
+           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case 3:
+           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -3:
+           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case 4:
+           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -4:
+           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+      }
+    }
     #endif
     return PyLong_AsSsize_t(b);
   }
diff --git a/skbio/alignment/_ssw_wrapper.pyx b/skbio/alignment/_ssw_wrapper.pyx
index 5a65d2e..272decc 100644
--- a/skbio/alignment/_ssw_wrapper.pyx
+++ b/skbio/alignment/_ssw_wrapper.pyx
@@ -9,7 +9,6 @@
 from cpython cimport bool
 import numpy as np
 cimport numpy as cnp
-from skbio.alignment import Alignment
 from skbio.sequence import Protein, Sequence
 
 cdef extern from "_lib/ssw.h":
diff --git a/skbio/alignment/_tabular_msa.py b/skbio/alignment/_tabular_msa.py
new file mode 100644
index 0000000..69003df
--- /dev/null
+++ b/skbio/alignment/_tabular_msa.py
@@ -0,0 +1,2342 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+import copy
+
+from future.builtins import range
+from future.utils import viewkeys, viewvalues
+import numpy as np
+import pandas as pd
+import scipy.stats
+
+from skbio._base import SkbioObject, MetadataMixin, PositionalMetadataMixin
+from skbio.sequence import Sequence
+from skbio.sequence._iupac_sequence import IUPACSequence
+from skbio.util._decorator import experimental, classonlymethod, overrides
+from skbio.util._misc import resolve_key
+from skbio.alignment._indexing import TabularMSAILoc, TabularMSALoc
+
+from skbio.alignment._repr import _TabularMSAReprBuilder
+
+
+_Shape = collections.namedtuple('Shape', ['sequence', 'position'])
+
+
+class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
+    """Store a multiple sequence alignment in tabular (row/column) form.
+
+    Parameters
+    ----------
+    sequences : iterable of IUPACSequence, TabularMSA
+        Aligned sequences in the MSA. Sequences must all be the same type and
+        length. For example, `sequences` could be an iterable of ``DNA``,
+        ``RNA``, or ``Protein`` sequences. If `sequences` is a ``TabularMSA``,
+        its `metadata`, `positional_metadata`, and `index` will be used unless
+        overridden by parameters `metadata`, `positional_metadata`, and
+        `minter`/`index`, respectively.
+    metadata : dict, optional
+        Arbitrary metadata which applies to the entire MSA. A shallow copy of
+        the ``dict`` will be made.
+    positional_metadata : pd.DataFrame consumable, optional
+        Arbitrary metadata which applies to each position in the MSA. Must be
+        able to be passed directly to ``pd.DataFrame`` constructor. Each column
+        of metadata must be the same length as the number of positions in the
+        MSA. A shallow copy of the positional metadata will be made.
+    minter : callable or metadata key, optional
+        If provided, defines an index label for each sequence in `sequences`.
+        Can either be a callable accepting a single argument (each sequence) or
+        a key into each sequence's ``metadata`` attribute.
+    index : pd.Index consumable, optional
+        Index containing labels for `sequences`. Must be the same length as
+        `sequences`. Must be able to be passed directly to ``pd.Index``
+        constructor.
+
+    Raises
+    ------
+    ValueError
+        If `minter` and `index` are both provided.
+    ValueError
+        If `index` is not the same length as `sequences`.
+
+    See Also
+    --------
+    skbio.sequence.DNA
+    skbio.sequence.RNA
+    skbio.sequence.Protein
+    pandas.DataFrame
+    pandas.Index
+
+    Notes
+    -----
+    If `minter` or `index` are not provided, default pandas labels will be
+    used: integer labels ``0..(N-1)``, where ``N`` is the number of sequences.
+
+    Examples
+    --------
+    Create a ``TabularMSA`` object with three DNA sequences and four positions:
+
+    >>> from skbio import DNA, TabularMSA
+    >>> seqs = [
+    ...     DNA('ACGT'),
+    ...     DNA('AG-T'),
+    ...     DNA('-C-T')
+    ... ]
+    >>> msa = TabularMSA(seqs)
+    >>> msa
+    TabularMSA[DNA]
+    ---------------------
+    Stats:
+        sequence count: 3
+        position count: 4
+    ---------------------
+    ACGT
+    AG-T
+    -C-T
+
+    The MSA has default index labels:
+
+    >>> msa.index
+    Int64Index([0, 1, 2], dtype='int64')
+
+    Create an MSA with metadata, positional metadata, and non-default index
+    labels:
+
+    >>> msa = TabularMSA(seqs, index=['seq1', 'seq2', 'seq3'],
+    ...                  metadata={'id': 'msa-id'},
+    ...                  positional_metadata={'prob': [3, 4, 2, 2]})
+    >>> msa
+    TabularMSA[DNA]
+    --------------------------
+    Metadata:
+        'id': 'msa-id'
+    Positional metadata:
+        'prob': <dtype: int64>
+    Stats:
+        sequence count: 3
+        position count: 4
+    --------------------------
+    ACGT
+    AG-T
+    -C-T
+    >>> msa.index
+    Index(['seq1', 'seq2', 'seq3'], dtype='object')
+
+    """
+    default_write_format = 'fasta'
+
+    @property
+    @experimental(as_of='0.4.1')
+    def dtype(self):
+        """Data type of the stored sequences.
+
+        Notes
+        -----
+        This property is not writeable.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> msa.dtype
+        <class 'skbio.sequence._dna.DNA'>
+        >>> msa.dtype is DNA
+        True
+
+        """
+        return type(self._get_sequence_iloc_(0)) if len(self) > 0 else None
+
+    @property
+    @experimental(as_of='0.4.1')
+    def shape(self):
+        """Number of sequences (rows) and positions (columns).
+
+        Notes
+        -----
+        This property is not writeable.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+
+        Create a ``TabularMSA`` object with 2 sequences and 3 positions:
+
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> msa.shape
+        Shape(sequence=2, position=3)
+        >>> msa.shape == (2, 3)
+        True
+
+        Dimensions can be accessed by index or by name:
+
+        >>> msa.shape[0]
+        2
+        >>> msa.shape.sequence
+        2
+        >>> msa.shape[1]
+        3
+        >>> msa.shape.position
+        3
+
+        """
+        sequence_count = len(self)
+
+        if sequence_count > 0:
+            position_count = len(self._get_sequence_iloc_(0))
+        else:
+            position_count = 0
+
+        return _Shape(sequence=sequence_count, position=position_count)
+
+    @property
+    @experimental(as_of='0.4.1')
+    def index(self):
+        """Index containing labels along the sequence axis.
+
+        Returns
+        -------
+        pd.Index
+            Index containing sequence labels.
+
+        See Also
+        --------
+        reassign_index
+
+        Notes
+        -----
+        This property can be set and deleted. Deleting the index will reset the
+        index to the ``TabularMSA`` constructor's default.
+
+        Examples
+        --------
+        Create a ``TabularMSA`` object with sequences labeled by sequence
+        identifier:
+
+        >>> from skbio import DNA, TabularMSA
+        >>> seqs = [DNA('ACG', metadata={'id': 'a'}),
+        ...         DNA('AC-', metadata={'id': 'b'})]
+        >>> msa = TabularMSA(seqs, minter='id')
+
+        Retrieve index:
+
+        >>> msa.index
+        Index(['a', 'b'], dtype='object')
+
+        Set index:
+
+        >>> msa.index = ['seq1', 'seq2']
+        >>> msa.index
+        Index(['seq1', 'seq2'], dtype='object')
+
+        Delete index:
+
+        >>> del msa.index
+        >>> msa.index
+        Int64Index([0, 1], dtype='int64')
+
+        """
+        return self._seqs.index
+
+    @index.setter
+    def index(self, index):
+        # Cast to Index to identify tuples as a MultiIndex to match
+        # pandas constructor. Just setting would make an index of tuples.
+        if not isinstance(index, pd.Index):
+            self._seqs.index = pd.Index(index)
+        else:
+            self._seqs.index = index
+
+    @index.deleter
+    def index(self):
+        self.reassign_index()
+
+    @property
+    @experimental(as_of="0.4.1")
+    def loc(self):
+        """Slice the MSA on first axis by index label, second axis by position.
+
+        This will return an object with the following interface:
+
+        .. code-block:: python
+
+           msa.loc[seq_idx]
+           msa.loc[seq_idx, pos_idx]
+           msa.loc(axis='sequence')[seq_idx]
+           msa.loc(axis='position')[pos_idx]
+
+        Parameters
+        ----------
+        seq_idx : label, slice, 1D array_like (bool or label)
+            Slice the first axis of the MSA. When this value is a scalar, a
+            sequence of ``msa.dtype`` will be returned. This may be further
+            sliced by `pos_idx`.
+        pos_idx : (same as seq_idx), optional
+            Slice the second axis of the MSA. When this value is a scalar, a
+            sequence of type :class:`skbio.sequence.Sequence` will be returned.
+            This represents a column of the MSA and may have been additionally
+            sliced by `seq_idx`.
+        axis : {'sequence', 'position', 0, 1, None}, optional
+            Limit the axis to slice on. When set, a tuple as the argument will
+            no longer be split into `seq_idx` and `pos_idx`.
+
+        Returns
+        -------
+        TabularMSA, IUPACSequence, Sequence
+            A ``TabularMSA`` is returned when `seq_idx` and `pos_idx` are
+            non-scalars. A ``IUPACSequence`` of type ``msa.dtype`` is returned
+            when `seq_idx` is a scalar (this object will match the dtype of the
+            MSA). A ``Sequence`` is returned when `seq_idx` is non-scalar and
+            `pos_idx` is scalar.
+
+        See Also
+        --------
+        iloc
+        __getitem__
+
+        Notes
+        -----
+        If the slice operation results in a ``TabularMSA`` without any
+        sequences, the MSA's ``positional_metadata`` will be unset.
+
+        When the MSA's index is a ``pd.MultiIndex`` a tuple may be given to
+        `seq_idx` to indicate the slicing operations to perform on each
+        component index.
+
+        Examples
+        --------
+        First we need to set up an MSA to slice:
+
+        >>> from skbio import TabularMSA, DNA
+        >>> msa = TabularMSA([DNA("ACGT"), DNA("A-GT"), DNA("AC-T"),
+        ...                   DNA("ACGA")], index=['a', 'b', 'c', 'd'])
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 4
+            position count: 4
+        ---------------------
+        ACGT
+        A-GT
+        AC-T
+        ACGA
+        >>> msa.index
+        Index(['a', 'b', 'c', 'd'], dtype='object')
+
+
+        When we slice by a scalar we get the original sequence back out of the
+        MSA:
+
+        >>> msa.loc['b']
+        DNA
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: True
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 33.33%
+        -----------------------------
+        0 A-GT
+
+        Similarly when we slice the second axis by a scalar we get a column of
+        the MSA:
+
+        >>> msa.loc[..., 1]
+        Sequence
+        -------------
+        Stats:
+            length: 4
+        -------------
+        0 C-CC
+
+        Note: we return an ``skbio.Sequence`` object because the column of an
+        alignment has no biological meaning and many operations defined for the
+        MSA's sequence `dtype` would be meaningless.
+
+        When we slice both axes by a scalar, operations are applied left to
+        right:
+
+        >>> msa.loc['a', 0]
+        DNA
+        -----------------------------
+        Stats:
+            length: 1
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 0.00%
+        -----------------------------
+        0 A
+
+        In other words, it exactly matches slicing the resulting sequence
+        object directly:
+
+        >>> msa.loc['a'][0]
+        DNA
+        -----------------------------
+        Stats:
+            length: 1
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 0.00%
+        -----------------------------
+        0 A
+
+        When our slice is non-scalar we get back an MSA of the same `dtype`:
+
+        >>> msa.loc[['a', 'c']]
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 4
+        ---------------------
+        ACGT
+        AC-T
+
+        We can similarly slice out a column of that:
+
+        >>> msa.loc[['a', 'c'], 2]
+        Sequence
+        -------------
+        Stats:
+            length: 2
+        -------------
+        0 G-
+
+        Slice syntax works as well:
+
+        >>> msa.loc[:'c']
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 3
+            position count: 4
+        ---------------------
+        ACGT
+        A-GT
+        AC-T
+
+        Notice how the end label is included in the results. This is different
+        from how positional slices behave:
+
+        >>> msa.loc[[True, False, False, True], 2:3]
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 1
+        ---------------------
+        G
+        G
+
+        Here we sliced the first axis by a boolean vector, but then restricted
+        the columns to a single column. Because the second axis was given a
+        nonscalar we still recieve an MSA even though only one column is
+        present.
+
+        Duplicate labels can be an unfortunate reality in the real world,
+        however `loc` is capable of handling this:
+
+        >>> msa.index = ['a', 'a', 'b', 'c']
+
+        Notice how the label 'a' happens twice. If we were to access 'a' we get
+        back an MSA with both sequences:
+
+        >>> msa.loc['a']
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 4
+        ---------------------
+        ACGT
+        A-GT
+
+        Remember that `iloc` can always be used to differentiate sequences with
+        duplicate labels.
+
+        More advanced slicing patterns are possible with different index types.
+
+        Let's use a `pd.MultiIndex`:
+
+        >>> msa.index = [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
+
+        Here we will explicitly set the axis that we are slicing by to make
+        things easier to read:
+
+        >>> msa.loc(axis='sequence')['a', 0]
+        DNA
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 50.00%
+        -----------------------------
+        0 ACGT
+
+        This selected the first sequence because the complete label was
+        provided. In other words `('a', 0)` was treated as a scalar for this
+        index.
+
+        We can also slice along the component indices of the multi-index:
+
+        >>> msa.loc(axis='sequence')[:, 1]
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 4
+        ---------------------
+        A-GT
+        ACGA
+
+        If we were to do that again without the `axis` argument, it would look
+        like this:
+
+        >>> msa.loc[(slice(None), 1), ...]
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 4
+        ---------------------
+        A-GT
+        ACGA
+
+        Notice how we needed to specify the second axis. If we had left that
+        out we would have simply gotten the 2nd column back instead. We also
+        lost the syntactic sugar for slice objects. These are a few of the
+        reasons specifying the `axis` preemptively can be useful.
+
+        """
+        return self._loc
+
+    @property
+    @experimental(as_of="0.4.1")
+    def iloc(self):
+        """Slice the MSA on either axis by index position.
+
+        This will return an object with the following interface:
+
+        .. code-block:: python
+
+           msa.iloc[seq_idx]
+           msa.iloc[seq_idx, pos_idx]
+           msa.iloc(axis='sequence')[seq_idx]
+           msa.iloc(axis='position')[pos_idx]
+
+        Parameters
+        ----------
+        seq_idx : int, slice, iterable (int and slice), 1D array_like (bool)
+            Slice the first axis of the MSA. When this value is a scalar, a
+            sequence of ``msa.dtype`` will be returned. This may be further
+            sliced by `pos_idx`.
+        pos_idx : (same as seq_idx), optional
+            Slice the second axis of the MSA. When this value is a scalar, a
+            sequence of type :class:`skbio.sequence.Sequence` will be returned.
+            This represents a column of the MSA and may have been additionally
+            sliced by `seq_idx`.
+        axis : {'sequence', 'position', 0, 1, None}, optional
+            Limit the axis to slice on. When set, a tuple as the argument will
+            no longer be split into `seq_idx` and `pos_idx`.
+
+        Returns
+        -------
+        TabularMSA, IUPACSequence, Sequence
+            A ``TabularMSA`` is returned when `seq_idx` and `pos_idx` are
+            non-scalars. A ``IUPACSequence`` of type ``msa.dtype`` is returned
+            when `seq_idx` is a scalar (this object will match the dtype of the
+            MSA). A ``Sequence`` is returned when `seq_idx` is non-scalar and
+            `pos_idx` is scalar.
+
+        See Also
+        --------
+        __getitem__
+        loc
+
+        Notes
+        -----
+        If the slice operation results in a ``TabularMSA`` without any
+        sequences, the MSA's ``positional_metadata`` will be unset.
+
+        Examples
+        --------
+        First we need to set up an MSA to slice:
+
+        >>> from skbio import TabularMSA, DNA
+        >>> msa = TabularMSA([DNA("ACGT"), DNA("A-GT"), DNA("AC-T"),
+        ...                   DNA("ACGA")])
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 4
+            position count: 4
+        ---------------------
+        ACGT
+        A-GT
+        AC-T
+        ACGA
+
+        When we slice by a scalar we get the original sequence back out of the
+        MSA:
+
+        >>> msa.iloc[1]
+        DNA
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: True
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 33.33%
+        -----------------------------
+        0 A-GT
+
+        Similarly when we slice the second axis by a scalar we get a column of
+        the MSA:
+
+        >>> msa.iloc[..., 1]
+        Sequence
+        -------------
+        Stats:
+            length: 4
+        -------------
+        0 C-CC
+
+        Note: we return an ``skbio.Sequence`` object because the column of an
+        alignment has no biological meaning and many operations defined for the
+        MSA's sequence `dtype` would be meaningless.
+
+        When we slice both axes by a scalar, operations are applied left to
+        right:
+
+        >>> msa.iloc[0, 0]
+        DNA
+        -----------------------------
+        Stats:
+            length: 1
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 0.00%
+        -----------------------------
+        0 A
+
+        In other words, it exactly matches slicing the resulting sequence
+        object directly:
+
+        >>> msa.iloc[0][0]
+        DNA
+        -----------------------------
+        Stats:
+            length: 1
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 0.00%
+        -----------------------------
+        0 A
+
+        When our slice is non-scalar we get back an MSA of the same `dtype`:
+
+        >>> msa.iloc[[0, 2]]
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 4
+        ---------------------
+        ACGT
+        AC-T
+
+        We can similarly slice out a column of that:
+
+        >>> msa.iloc[[0, 2], 2]
+        Sequence
+        -------------
+        Stats:
+            length: 2
+        -------------
+        0 G-
+
+        Slice syntax works as well:
+
+        >>> msa.iloc[:3]
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 3
+            position count: 4
+        ---------------------
+        ACGT
+        A-GT
+        AC-T
+
+        We can also use boolean vectors:
+
+        >>> msa.iloc[[True, False, False, True], 2:3]
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 1
+        ---------------------
+        G
+        G
+
+        Here we sliced the first axis by a boolean vector, but then restricted
+        the columns to a single column. Because the second axis was given a
+        nonscalar we still recieve an MSA even though only one column is
+        present.
+
+        """
+        return self._iloc
+
+    @classonlymethod
+    @experimental(as_of="0.4.1")
+    def from_dict(cls, dictionary):
+        """Create a ``TabularMSA`` from a ``dict``.
+
+        Parameters
+        ----------
+        dictionary : dict
+            Dictionary mapping keys to ``IUPACSequence`` sequence objects. The
+            ``TabularMSA`` object will have its index labels set
+            to the keys in the dictionary.
+
+        Returns
+        -------
+        TabularMSA
+            ``TabularMSA`` object constructed from the keys and sequences in
+            `dictionary`.
+
+        See Also
+        --------
+        to_dict
+        sort
+
+        Notes
+        -----
+        The order of sequences and index labels in the resulting ``TabularMSA``
+        object is arbitrary. Use ``TabularMSA.sort`` to set a different order.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> seqs = {'a': DNA('ACGT'), 'b': DNA('A--T')}
+        >>> msa = TabularMSA.from_dict(seqs)
+        >>> msa.shape
+        Shape(sequence=2, position=4)
+        >>> 'a' in msa
+        True
+        >>> 'b' in msa
+        True
+
+        """
+        # Python 2 and 3 guarantee same order of iteration as long as no
+        # modifications are made to the dictionary between calls:
+        #     https://docs.python.org/2/library/stdtypes.html#dict.items
+        #     https://docs.python.org/3/library/stdtypes.html#
+        #         dictionary-view-objects
+        return cls(viewvalues(dictionary), index=viewkeys(dictionary))
+
+    @experimental(as_of='0.4.1')
+    def __init__(self, sequences, metadata=None, positional_metadata=None,
+                 minter=None, index=None):
+        if isinstance(sequences, TabularMSA):
+            if metadata is None and sequences.has_metadata():
+                metadata = sequences.metadata
+            if (positional_metadata is None and
+                    sequences.has_positional_metadata()):
+                positional_metadata = sequences.positional_metadata
+            if minter is None and index is None:
+                index = sequences.index
+
+        self._seqs = pd.Series([])
+        self.extend(sequences, minter=minter, index=index)
+
+        MetadataMixin._init_(self, metadata=metadata)
+        PositionalMetadataMixin._init_(
+            self, positional_metadata=positional_metadata)
+
+        # Set up our indexers
+        self._loc = TabularMSALoc(self)
+        self._iloc = TabularMSAILoc(self)
+
+    def _constructor_(self, sequences=NotImplemented, metadata=NotImplemented,
+                      positional_metadata=NotImplemented,
+                      index=NotImplemented):
+        """Return new copy of the MSA with overridden properties.
+
+        NotImplemented is used as a sentinel so that None may be used to
+        override values.
+        """
+        if metadata is NotImplemented:
+            if self.has_metadata():
+                metadata = self.metadata
+            else:
+                metadata = None
+        if positional_metadata is NotImplemented:
+            if self.has_positional_metadata():
+                positional_metadata = self.positional_metadata
+            else:
+                positional_metadata = None
+
+        if index is NotImplemented:
+            if isinstance(sequences, pd.Series):
+                index = sequences.index
+            else:
+                index = self.index
+
+        if sequences is NotImplemented:
+            sequences = self._seqs
+
+        sequences = [copy.copy(s) for s in sequences]
+
+        return self.__class__(sequences, metadata=metadata,
+                              positional_metadata=positional_metadata,
+                              index=index)
+
+    @experimental(as_of='0.4.1')
+    def __repr__(self):
+        """String summary of this MSA."""
+        pep8_line_length_limit = 79
+        length_taken_by_docstring_indent = 8
+        width = pep8_line_length_limit - length_taken_by_docstring_indent
+        return _TabularMSAReprBuilder(
+            msa=self,
+            width=width,
+            indent=4).build()
+
+    def _repr_stats(self):
+        return [("sequence count", str(self.shape.sequence)),
+                ("position count", str(self.shape.position))]
+
+    @experimental(as_of='0.4.1')
+    def __bool__(self):
+        """Boolean indicating whether the MSA is empty or not.
+
+        Returns
+        -------
+        bool
+            ``False`` if there are no sequences, OR if there are no positions
+            (i.e., all sequences are empty). ``True`` otherwise.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+
+        MSA with sequences and positions:
+
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> bool(msa)
+        True
+
+        No sequences:
+
+        >>> msa = TabularMSA([])
+        >>> bool(msa)
+        False
+
+        No positions:
+
+        >>> msa = TabularMSA([DNA(''), DNA('')])
+        >>> bool(msa)
+        False
+
+        """
+        # It is impossible to have 0 sequences and >0 positions.
+        return self.shape.position > 0
+
+    # Python 2 compatibility.
+    __nonzero__ = __bool__
+
+    @experimental(as_of='0.4.1')
+    def __contains__(self, label):
+        """Determine if an index label is in this MSA.
+
+        Parameters
+        ----------
+        label : hashable
+            Label to search for in this MSA.
+
+        Returns
+        -------
+        bool
+            Indicates whether `label` is in this MSA.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')], index=['l1', 'l2'])
+        >>> 'l1' in msa
+        True
+        >>> 'l2' in msa
+        True
+        >>> 'l3' in msa
+        False
+
+        """
+        return label in self.index
+
+    @experimental(as_of='0.4.1')
+    def __len__(self):
+        """Number of sequences in the MSA.
+
+        Returns
+        -------
+        int
+            Number of sequences in the MSA (i.e., size of the 1st dimension).
+
+        Notes
+        -----
+        This is equivalent to ``msa.shape[0]``.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> len(msa)
+        2
+        >>> msa = TabularMSA([])
+        >>> len(msa)
+        0
+
+        """
+        return len(self._seqs)
+
+    @experimental(as_of='0.4.1')
+    def __iter__(self):
+        """Iterate over sequences in the MSA.
+
+        Yields
+        ------
+        IUPACSequence
+            Each sequence in the order they are stored in the MSA.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> for seq in msa:
+        ...     str(seq)
+        'ACG'
+        'AC-'
+
+        """
+        return iter(self._seqs)
+
+    @experimental(as_of='0.4.1')
+    def __reversed__(self):
+        """Iterate in reverse order over sequences in the MSA.
+
+        Yields
+        ------
+        IUPACSequence
+            Each sequence in reverse order from how they are stored in the MSA.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> for seq in reversed(msa):
+        ...     str(seq)
+        'AC-'
+        'ACG'
+
+        """
+        return reversed(self._seqs)
+
+    @experimental(as_of='0.4.1')
+    def __str__(self):
+        """String summary of this MSA."""
+        return self.__repr__()
+
+    @experimental(as_of='0.4.1')
+    def __eq__(self, other):
+        """Determine if this MSA is equal to another.
+
+        ``TabularMSA`` objects are equal if their sequences, index, metadata,
+        and positional metadata are equal.
+
+        Parameters
+        ----------
+        other : TabularMSA
+            MSA to test for equality against.
+
+        Returns
+        -------
+        bool
+            Indicates whether this MSA is equal to `other`.
+
+        Examples
+        --------
+        >>> from skbio import DNA, RNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> msa == msa
+        True
+
+        MSAs with different sequence characters are not equal:
+
+        >>> msa == TabularMSA([DNA('ACG'), DNA('--G')])
+        False
+
+        MSAs with different types of sequences (different ``dtype``) are not
+        equal:
+
+        >>> msa == TabularMSA([RNA('ACG'), RNA('AC-')])
+        False
+
+        MSAs with different sequence metadata are not equal:
+
+        >>> msa == TabularMSA([DNA('ACG', metadata={'id': 'a'}), DNA('AC-')])
+        False
+
+        MSAs with different index labels are not equal:
+
+        >>> msa == TabularMSA([DNA('ACG'), DNA('AC-')], minter=str)
+        False
+
+        MSAs with different metadata are not equal:
+
+        >>> msa == TabularMSA([DNA('ACG'), DNA('AC-')],
+        ...                   metadata={'id': 'msa-id'})
+        False
+
+        MSAs with different positional metadata are not equal:
+
+        >>> msa == TabularMSA([DNA('ACG'), DNA('AC-')],
+        ...                   positional_metadata={'prob': [3, 2, 1]})
+        False
+
+        """
+        if not isinstance(other, TabularMSA):
+            return False
+
+        if not MetadataMixin._eq_(self, other):
+            return False
+
+        if not PositionalMetadataMixin._eq_(self, other):
+            return False
+
+        return self._seqs.equals(other._seqs)
+
+    @experimental(as_of='0.4.1')
+    def __ne__(self, other):
+        """Determine if this MSA is not equal to another.
+
+        ``TabularMSA`` objects are not equal if their sequences, index,
+        metadata, or positional metadata are not equal.
+
+        Parameters
+        ----------
+        other : TabularMSA
+            MSA to test for inequality against.
+
+        Returns
+        -------
+        bool
+            Indicates whether this MSA is not equal to `other`.
+
+        See Also
+        --------
+        __eq__
+
+        """
+        return not (self == other)
+
+    @experimental(as_of='0.4.1')
+    def __copy__(self):
+        """Return a shallow copy of this MSA.
+
+        Returns
+        -------
+        TabularMSA
+            Shallow copy of this MSA. Sequence objects will be shallow-copied.
+
+        See Also
+        --------
+        __deepcopy__
+
+        Examples
+        --------
+        >>> import copy
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> msa_copy = copy.copy(msa)
+        >>> msa_copy == msa
+        True
+        >>> msa_copy is msa
+        False
+
+        """
+        msa_copy = self._constructor_()
+
+        msa_copy._metadata = MetadataMixin._copy_(self)
+        msa_copy._positional_metadata = PositionalMetadataMixin._copy_(self)
+
+        return msa_copy
+
+    @experimental(as_of='0.4.1')
+    def __deepcopy__(self, memo):
+        """Return a deep copy of this MSA.
+
+        Returns
+        -------
+        TabularMSA
+            Deep copy of this MSA. Sequence objects will be deep-copied.
+
+        See Also
+        --------
+        __copy__
+
+        Examples
+        --------
+        >>> import copy
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'), DNA('AC-')])
+        >>> msa_copy = copy.deepcopy(msa)
+        >>> msa_copy == msa
+        True
+        >>> msa_copy is msa
+        False
+
+        """
+        seqs = (copy.deepcopy(seq, memo) for seq in self._seqs)
+        msa_copy = self._constructor_(sequences=seqs)
+
+        msa_copy._metadata = MetadataMixin._deepcopy_(self, memo)
+        msa_copy._positional_metadata = \
+            PositionalMetadataMixin._deepcopy_(self, memo)
+
+        return msa_copy
+
+    @experimental(as_of="0.4.1")
+    def __getitem__(self, indexable):
+        """Slice the MSA on either axis.
+
+        This is a pass-through for :func:`skbio.alignment.TabularMSA.iloc`.
+        Please refer to the associated documentation.
+
+        See Also
+        --------
+        iloc
+        loc
+
+        Notes
+        -----
+        Axis restriction is not possible for this method.
+
+        To slice by labels, use ``loc``.
+
+        """
+        return self.iloc[indexable]
+
+    # Helpers for TabularMSAILoc and TabularMSALoc
+    def _get_sequence_iloc_(self, i):
+        return self._seqs.iloc[i]
+
+    def _slice_sequences_iloc_(self, i):
+        new_seqs = self._seqs.iloc[i]
+        # TODO: change for #1198
+        if len(new_seqs) == 0:
+            return self._constructor_(new_seqs, positional_metadata=None)
+        return self._constructor_(new_seqs)
+
+    def _get_sequence_loc_(self, l):
+        new_seqs = self._seqs.loc[l]
+        if type(new_seqs) is self.dtype:
+            return new_seqs
+        else:
+            # Thanks CategoricalIndex, you understand no such thing as a scalar
+            if len(new_seqs) == 1:
+                return new_seqs.iloc[0]
+            else:
+                # This was a common failure mode; shouldn't happen anymore, but
+                # it could strike again.
+                raise AssertionError(
+                    "Something went wrong with the index %r provided to"
+                    " `_get_sequence_loc_`, please report this stack trace to"
+                    "\nhttps://github.com/biocore/scikit-bio/issues" % l)
+
+    def _slice_sequences_loc_(self, l):
+        new_seqs = self._seqs.loc[l]
+        try:
+            # TODO: change for #1198
+            if len(new_seqs) == 0:
+                return self._constructor_(new_seqs, positional_metadata=None)
+            return self._constructor_(new_seqs)
+        except TypeError:  # NaN hit the constructor, key was bad... probably
+            raise KeyError("Part of `%r` was not in the index.")
+
+    def _get_position_(self, i):
+        seq = Sequence.concat([s[i] for s in self._seqs], how='outer')
+        if self.has_positional_metadata():
+            seq.metadata = dict(self.positional_metadata.iloc[i])
+        return seq
+
+    def _slice_positions_(self, i):
+        seqs = self._seqs.apply(lambda seq: seq[i])
+        pm = None
+        if self.has_positional_metadata():
+            pm = self.positional_metadata.iloc[i]
+        return self._constructor_(seqs, positional_metadata=pm)
+    # end of helpers
+
+    @experimental(as_of='0.4.1')
+    def iter_positions(self, reverse=False):
+        """Iterate over positions (columns) in the MSA.
+
+        Parameters
+        ----------
+        reverse : bool, optional
+            If ``True``, iterate over positions in reverse order.
+
+        Yields
+        ------
+        Sequence
+            Each position in the order they are stored in the MSA.
+
+        See Also
+        --------
+        __iter__
+        __reversed__
+        skbio.sequence.Sequence.concat
+
+        Notes
+        -----
+        Each position will be yielded as *exactly* a ``Sequence`` object,
+        regardless of this MSA's ``dtype``. ``Sequence`` is used because a
+        position is an artifact of multiple sequence alignment and is not a
+        real biological sequence.
+
+        Each ``Sequence`` object will have its corresponding MSA positional
+        metadata stored as ``metadata``.
+
+        Sequences will have their positional metadata concatenated using an
+        outer join. See ``Sequence.concat(how='outer')`` for details.
+
+        Examples
+        --------
+        Create an MSA with positional metadata:
+
+        >>> from skbio import DNA, TabularMSA
+        >>> sequences = [DNA('ACG'),
+        ...              DNA('A-T')]
+        >>> msa = TabularMSA(sequences,
+        ...                  positional_metadata={'prob': [3, 1, 2]})
+
+        Iterate over positions:
+
+        >>> for position in msa.iter_positions():
+        ...     position
+        ...     print()
+        Sequence
+        -------------
+        Metadata:
+            'prob': 3
+        Stats:
+            length: 2
+        -------------
+        0 AA
+        <BLANKLINE>
+        Sequence
+        -------------
+        Metadata:
+            'prob': 1
+        Stats:
+            length: 2
+        -------------
+        0 C-
+        <BLANKLINE>
+        Sequence
+        -------------
+        Metadata:
+            'prob': 2
+        Stats:
+            length: 2
+        -------------
+        0 GT
+        <BLANKLINE>
+
+        Note that MSA positional metadata is stored as ``metadata`` on each
+        ``Sequence`` object.
+
+        Iterate over positions in reverse order:
+
+        >>> for position in msa.iter_positions(reverse=True):
+        ...     position
+        ...     print('')
+        Sequence
+        -------------
+        Metadata:
+            'prob': 2
+        Stats:
+            length: 2
+        -------------
+        0 GT
+        <BLANKLINE>
+        Sequence
+        -------------
+        Metadata:
+            'prob': 1
+        Stats:
+            length: 2
+        -------------
+        0 C-
+        <BLANKLINE>
+        Sequence
+        -------------
+        Metadata:
+            'prob': 3
+        Stats:
+            length: 2
+        -------------
+        0 AA
+        <BLANKLINE>
+
+        """
+        indices = range(self.shape.position)
+        if reverse:
+            indices = reversed(indices)
+
+        return (self._get_position_(index) for index in indices)
+
+    @experimental(as_of='0.4.1')
+    def consensus(self):
+        """Compute the majority consensus sequence for this MSA.
+
+        The majority consensus sequence contains the most common character at
+        each position in this MSA. Ties will be broken in an arbitrary manner.
+
+        Returns
+        -------
+        Sequence
+            The majority consensus sequence for this MSA. The type of sequence
+            returned will be the same as this MSA's ``dtype`` or ``Sequence``
+            if this MSA does not contain any sequences. The majority consensus
+            sequence will have its positional metadata set to this MSA's
+            positional metadata if present.
+
+        Notes
+        -----
+        The majority consensus sequence will use this MSA's default gap
+        character (``dtype.default_gap_char``) to represent gap majority at a
+        position, regardless of the gap characters present at that position.
+
+        Different gap characters at a position are **not** treated as distinct
+        characters. All gap characters at a position contribute to that
+        position's gap consensus.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> sequences = [DNA('AC---'),
+        ...              DNA('AT-C.'),
+        ...              DNA('TT-CG')]
+        >>> msa = TabularMSA(sequences,
+        ...                  positional_metadata={'prob': [2, 1, 2, 3, 5]})
+        >>> msa.consensus()
+        DNA
+        -----------------------------
+        Positional metadata:
+            'prob': <dtype: int64>
+        Stats:
+            length: 5
+            has gaps: True
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 33.33%
+        -----------------------------
+        0 AT-C-
+
+        Note that the last position in the MSA has more than one type of gap
+        character. These are not treated as distinct characters; both types of
+        gap characters contribute to the position's consensus. Also note that
+        ``DNA.default_gap_char`` is used to represent gap majority at a
+        position (``'-'``).
+
+        """
+        dtype = self.dtype
+        if dtype is None:
+            dtype = Sequence
+
+        positional_metadata = None
+        if self.has_positional_metadata():
+            positional_metadata = self.positional_metadata
+
+        consensus = []
+        for position in self.iter_positions():
+            freqs = position.frequencies()
+
+            gap_freq = 0
+            for gap_char in dtype.gap_chars:
+                if gap_char in freqs:
+                    gap_freq += freqs.pop(gap_char)
+            assert dtype.default_gap_char not in freqs
+            freqs[dtype.default_gap_char] = gap_freq
+
+            consensus.append(collections.Counter(freqs).most_common(1)[0][0])
+
+        return dtype(''.join(consensus),
+                     positional_metadata=positional_metadata)
+
+    def _build_inverse_shannon_uncertainty_f(self, include_gaps):
+        base = len(self.dtype.nondegenerate_chars)
+        if include_gaps:
+            # Increment the base by one to reflect the possible inclusion of
+            # the default gap character.
+            base += 1
+
+        def f(p):
+            freqs = list(p.kmer_frequencies(k=1).values())
+            return 1. - scipy.stats.entropy(freqs, base=base)
+        return f
+
+    @experimental(as_of='0.4.1')
+    def conservation(self, metric='inverse_shannon_uncertainty',
+                     degenerate_mode='error', gap_mode='nan'):
+        """Apply metric to compute conservation for all alignment positions
+
+        Parameters
+        ----------
+        metric : {'inverse_shannon_uncertainty'}, optional
+            Metric that should be applied for computing conservation. Resulting
+            values should be larger when a position is more conserved.
+        degenerate_mode : {'nan', 'error'}, optional
+            Mode for handling positions with degenerate characters. If
+            ``"nan"``, positions with degenerate characters will be assigned a
+            conservation score of ``np.nan``. If ``"error"``, an
+            error will be raised if one or more degenerate characters are
+            present.
+        gap_mode : {'nan', 'ignore', 'error', 'include'}, optional
+            Mode for handling positions with gap characters. If ``"nan"``,
+            positions with gaps will be assigned a conservation score of
+            ``np.nan``. If ``"ignore"``, positions with gaps will be filtered
+            to remove gaps before ``metric`` is applied. If ``"error"``, an
+            error will be raised if one or more gap characters are present. If
+            ``"include"``, conservation will be computed on alignment positions
+            with gaps included. In this case, it is up to the metric to ensure
+            that gaps are handled as they should be or to raise an error if
+            gaps are not supported by that metric.
+
+        Returns
+        -------
+        np.array of floats
+            Values resulting from the application of ``metric`` to each
+            position in the alignment.
+
+        Raises
+        ------
+        ValueError
+            If an unknown ``metric``, ``degenerate_mode`` or ``gap_mode`` is
+            provided.
+        ValueError
+            If any degenerate characters are present in the alignment when
+            ``degenerate_mode`` is ``"error"``.
+        ValueError
+            If any gaps are present in the alignment when ``gap_mode`` is
+            ``"error"``.
+
+        Notes
+        -----
+        Users should be careful interpreting results when
+        ``gap_mode = "include"`` as the results may be misleading. For example,
+        as pointed out in [1]_, a protein alignment position composed of 90%
+        gaps and 10% tryptophans would score as more highly conserved than a
+        position composed of alanine and glycine in equal frequencies with the
+        ``"inverse_shannon_uncertainty"`` metric.
+
+        ``gap_mode = "include"`` will result in all gap characters being
+        recoded to ``Alignment.dtype.default_gap_char``. Because no
+        conservation metrics that we are aware of consider different gap
+        characters differently (e.g., none of the metrics described in [1]_),
+        they are all treated the same within this method.
+
+        The ``inverse_shannon_uncertainty`` metric is simiply one minus
+        Shannon's uncertainty metric. This method uses the inverse of Shannon's
+        uncertainty so that larger values imply higher conservation. Shannon's
+        uncertainty is also referred to as Shannon's entropy, but when making
+        computations from symbols, as is done here, "uncertainty" is the
+        preferred term ([2]_).
+
+        References
+        ----------
+        .. [1] Valdar WS. Scoring residue conservation. Proteins. (2002)
+        .. [2] Schneider T. Pitfalls in information theory (website, ca. 2015).
+           https://schneider.ncifcrf.gov/glossary.html#Shannon_entropy
+
+        """
+
+        if gap_mode not in {'nan', 'error', 'include', 'ignore'}:
+            raise ValueError("Unknown gap_mode provided: %s" % gap_mode)
+
+        if degenerate_mode not in {'nan', 'error'}:
+            raise ValueError("Unknown degenerate_mode provided: %s" %
+                             degenerate_mode)
+
+        if metric not in {'inverse_shannon_uncertainty'}:
+            raise ValueError("Unknown metric provided: %s" %
+                             metric)
+
+        if self.shape[0] == 0:
+            # handle empty alignment to avoid error on lookup of character sets
+            return np.array([])
+
+        # Since the only currently allowed metric is
+        # inverse_shannon_uncertainty, and we already know that a valid metric
+        # was provided, we just define metric_f here. When additional metrics
+        # are supported, this will be handled differently (e.g., via a lookup
+        # or if/elif/else).
+        metric_f = self._build_inverse_shannon_uncertainty_f(
+                        gap_mode == 'include')
+
+        result = []
+        for p in self.iter_positions():
+            cons = None
+            # cast p to self.dtype for access to gap/degenerate related
+            # functionality
+            pos_seq = self.dtype(p)
+
+            # handle degenerate characters if present
+            if pos_seq.has_degenerates():
+                if degenerate_mode == 'nan':
+                    cons = np.nan
+                else:  # degenerate_mode == 'error' is the only choice left
+                    degenerate_chars = pos_seq[pos_seq.degenerates()]
+                    raise ValueError("Conservation is undefined for positions "
+                                     "with degenerate characters. The "
+                                     "following degenerate characters were "
+                                     "observed: %s." % degenerate_chars)
+
+            # handle gap characters if present
+            if pos_seq.has_gaps():
+                if gap_mode == 'nan':
+                    cons = np.nan
+                elif gap_mode == 'error':
+                    raise ValueError("Gap characters present in alignment.")
+                elif gap_mode == 'ignore':
+                    pos_seq = pos_seq.degap()
+                else:  # gap_mode == 'include' is the only choice left
+                    # Recode all gap characters with pos_seq.default_gap_char.
+                    # This logic should be replaced with a call to
+                    # pos_seq.replace when it exists.
+                    # https://github.com/biocore/scikit-bio/issues/1222
+                    with pos_seq._byte_ownership():
+                        pos_seq._bytes[pos_seq.gaps()] = \
+                            ord(pos_seq.default_gap_char)
+
+            if cons is None:
+                cons = metric_f(pos_seq)
+
+            result.append(cons)
+
+        return np.array(result)
+
+    @experimental(as_of='0.4.1')
+    def gap_frequencies(self, axis='sequence', relative=False):
+        """Compute frequency of gap characters across an axis.
+
+        Parameters
+        ----------
+        axis : {'sequence', 'position'}, optional
+            Axis to compute gap character frequencies across. If 'sequence' or
+            0, frequencies are computed for each position in the MSA. If
+            'position' or 1, frequencies are computed for each sequence.
+        relative : bool, optional
+            If ``True``, return the relative frequency of gap characters
+            instead of the count.
+
+        Returns
+        -------
+        1D np.ndarray (int or float)
+            Vector of gap character frequencies across the specified axis. Will
+            have ``int`` dtype if ``relative=False`` and ``float`` dtype if
+            ``relative=True``.
+
+        Raises
+        ------
+        ValueError
+            If `axis` is invalid.
+
+        Notes
+        -----
+        If there are no positions in the MSA, ``axis='position'``, **and**
+        ``relative=True``, the relative frequency of gap characters in each
+        sequence will be ``np.nan``.
+
+        Examples
+        --------
+        Compute frequency of gap characters for each position in the MSA (i.e.,
+        *across* the sequence axis):
+
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACG'),
+        ...                   DNA('A--'),
+        ...                   DNA('AC.'),
+        ...                   DNA('AG.')])
+        >>> msa.gap_frequencies()
+        array([0, 1, 3])
+
+        Compute relative frequencies across the same axis:
+
+        >>> msa.gap_frequencies(relative=True)
+        array([ 0.  ,  0.25,  0.75])
+
+        Compute frequency of gap characters for each sequence (i.e., *across*
+        the position axis):
+
+        >>> msa.gap_frequencies(axis='position')
+        array([0, 2, 1, 1])
+
+        """
+        if self._is_sequence_axis(axis):
+            seq_iterator = self.iter_positions()
+            length = self.shape.sequence
+        else:
+            seq_iterator = self
+            length = self.shape.position
+
+        gap_freqs = []
+        for seq in seq_iterator:
+            # Not using Sequence.frequencies(relative=relative) because each
+            # gap character's relative frequency is computed separately and
+            # must be summed. This is less precise than summing the absolute
+            # frequencies of gap characters and dividing by the length. Likely
+            # not a big deal for typical gap characters ('-', '.') but can be
+            # problematic as the number of gap characters grows (we aren't
+            # guaranteed to always have two gap characters). See unit tests for
+            # an example.
+            freqs = seq.frequencies(chars=self.dtype.gap_chars)
+            gap_freqs.append(sum(viewvalues(freqs)))
+
+        gap_freqs = np.asarray(gap_freqs, dtype=float if relative else int)
+
+        if relative:
+            gap_freqs /= length
+
+        return gap_freqs
+
+    @experimental(as_of='0.4.1')
+    def reassign_index(self, mapping=None, minter=None):
+        """Reassign index labels to sequences in this MSA.
+
+        Parameters
+        ----------
+        mapping : dict-like or callable, optional
+            Dictionary or callable that maps existing labels to new labels. Any
+            label without a mapping will remain the same.
+        minter : callable or metadata key, optional
+            If provided, defines an index label for each sequence. Can either
+            be a callable accepting a single argument (each sequence) or a key
+            into each sequence's ``metadata`` attribute.
+
+        Raises
+        ------
+        ValueError
+            If `mapping` and `minter` are both provided.
+
+        See Also
+        --------
+        index
+
+        Notes
+        -----
+        If neither `mapping` nor `minter` are provided, default pandas labels
+        will be used: integer labels ``0..(N-1)``, where ``N`` is the number of
+        sequences.
+
+        Examples
+        --------
+        Create a ``TabularMSA`` object with default index labels:
+
+        >>> from skbio import DNA, TabularMSA
+        >>> seqs = [DNA('ACG', metadata={'id': 'a'}),
+        ...         DNA('AC-', metadata={'id': 'b'})]
+        >>> msa = TabularMSA(seqs)
+        >>> msa.index
+        Int64Index([0, 1], dtype='int64')
+
+        Assign new index to the MSA using each sequence's ID as a label:
+
+        >>> msa.reassign_index(minter='id')
+        >>> msa.index
+        Index(['a', 'b'], dtype='object')
+
+        Assign default index:
+
+        >>> msa.reassign_index()
+        >>> msa.index
+        Int64Index([0, 1], dtype='int64')
+
+        Alternatively, a mapping of existing labels to new labels may be passed
+        via `mapping`:
+
+        >>> msa.reassign_index(mapping={0: 'seq1', 1: 'seq2'})
+        >>> msa.index
+        Index(['seq1', 'seq2'], dtype='object')
+
+        """
+        if mapping is not None and minter is not None:
+            raise ValueError(
+                "Cannot use both `mapping` and `minter` at the same time.")
+        if mapping is not None:
+            self._seqs.rename(mapping, inplace=True)
+        elif minter is not None:
+            index = [resolve_key(seq, minter) for seq in self._seqs]
+
+            # Cast to Index to identify tuples as a MultiIndex to match
+            # pandas constructor. Just setting would make an index of tuples.
+            self.index = pd.Index(index)
+        else:
+            self._seqs.reset_index(drop=True, inplace=True)
+
+    @experimental(as_of='0.4.1')
+    def append(self, sequence, minter=None, index=None):
+        """Append a sequence to the MSA without recomputing alignment.
+
+        Parameters
+        ----------
+        sequence : IUPACSequence
+            Sequence to be appended. Must match the dtype of the MSA and the
+            number of positions in the MSA.
+        minter : callable or metadata key, optional
+            Used to create an index label for the sequence being appended. If
+            callable, it generates a label directly. Otherwise it's treated as
+            a key into the sequence metadata. Note that `minter` cannot be
+            combined with `index`.
+        index : object, optional
+            Index label to use for the appended sequence. Note that `index`
+            cannot be combined with `minter`.
+
+        Raises
+        ------
+        ValueError
+            If both `minter` and `index` are provided.
+        ValueError
+            If neither `minter` nor `index` are provided and the MSA has a
+            non-default index.
+        TypeError
+            If the sequence object isn't an ``IUPACSequence``.
+        TypeError
+            If the type of the sequence does not match the dtype of the MSA.
+        ValueError
+            If the length of the sequence does not match the number of
+            positions in the MSA.
+
+        See Also
+        --------
+        extend
+        reassign_index
+
+        Notes
+        -----
+        If neither `minter` nor `index` are provided and this MSA has default
+        index labels, the new index label will be auto-incremented.
+
+        The MSA is not automatically re-aligned when a sequence is appended.
+        Therefore, this operation is not necessarily meaningful on its own.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACGT')])
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 1
+            position count: 4
+        ---------------------
+        ACGT
+        >>> msa.append(DNA('AG-T'))
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 4
+        ---------------------
+        ACGT
+        AG-T
+
+        Auto-incrementing index labels:
+
+        >>> msa.index
+        Int64Index([0, 1], dtype='int64')
+        >>> msa.append(DNA('ACGA'))
+        >>> msa.index
+        Int64Index([0, 1, 2], dtype='int64')
+
+        """
+        if index is not None:
+            index = [index]
+        self.extend([sequence], minter=minter, index=index)
+
+    @experimental(as_of='0.4.1')
+    def extend(self, sequences, minter=None, index=None):
+        """Extend this MSA with sequences without recomputing alignment.
+
+        Parameters
+        ----------
+        sequences : iterable of IUPACSequence
+            Sequences to be appended. Must match the dtype of the MSA and the
+            number of positions in the MSA.
+        minter : callable or metadata key, optional
+            Used to create index labels for the sequences being appended. If
+            callable, it generates a label directly. Otherwise it's treated as
+            a key into the sequence metadata. Note that `minter` cannot be
+            combined with `index`.
+        index : pd.Index consumable, optional
+            Index labels to use for the appended sequences. Must be the same
+            length as `sequences`. Must be able to be passed directly to
+            ``pd.Index`` constructor. Note that `index` cannot be combined
+            with `minter`.
+
+        Raises
+        ------
+        ValueError
+            If both `minter` and `index` are both provided.
+        ValueError
+            If neither `minter` nor `index` are provided and the MSA has a
+            non-default index.
+        ValueError
+            If `index` is not the same length as `sequences`.
+        TypeError
+            If `sequences` contains an object that isn't an ``IUPACSequence``.
+        TypeError
+            If `sequence` contains a type that does not match the dtype of the
+            MSA.
+        ValueError
+            If the length of a sequence does not match the number of positions
+            in the MSA.
+
+        See Also
+        --------
+        append
+        reassign_index
+
+        Notes
+        -----
+        If neither `minter` nor `index` are provided and this MSA has default
+        index labels, the new index labels will be auto-incremented.
+
+        The MSA is not automatically re-aligned when appending sequences.
+        Therefore, this operation is not necessarily meaningful on its own.
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> msa = TabularMSA([DNA('ACGT')])
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 1
+            position count: 4
+        ---------------------
+        ACGT
+        >>> msa.extend([DNA('AG-T'), DNA('-G-T')])
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 3
+            position count: 4
+        ---------------------
+        ACGT
+        AG-T
+        -G-T
+
+        Auto-incrementing index labels:
+
+        >>> msa.index
+        Int64Index([0, 1, 2], dtype='int64')
+        >>> msa.extend([DNA('ACGA'), DNA('AC-T'), DNA('----')])
+        >>> msa.index
+        Int64Index([0, 1, 2, 3, 4, 5], dtype='int64')
+
+        """
+        if minter is not None and index is not None:
+            raise ValueError(
+                "Cannot use both `minter` and `index` at the same time.")
+
+        sequences = list(sequences)
+
+        if minter is None and index is None:
+            if self.index.equals(pd.Index(np.arange(len(self)))):
+                index = range(len(self), len(self) + len(sequences))
+            else:
+                raise ValueError(
+                    "MSA does not have default index labels, must provide "
+                    "a `minter` or `index` for sequence(s).")
+        elif minter is not None:
+            index = [resolve_key(seq, minter) for seq in sequences]
+
+        # Cast to Index to identify tuples as a MultiIndex to match
+        # pandas constructor. Just setting would make an index of tuples.
+        if not isinstance(index, pd.Index):
+            index = pd.Index(index)
+
+        self._assert_valid_sequences(sequences)
+
+        # pandas doesn't give a user-friendly error message if we pass through.
+        if len(sequences) != len(index):
+            raise ValueError(
+                "Number of sequences (%d) must match index length (%d)" %
+                (len(sequences), len(index)))
+        self._seqs = self._seqs.append(pd.Series(sequences, index=index))
+
+    def _assert_valid_sequences(self, sequences):
+        if not sequences:
+            return
+
+        if len(self):
+            expected_dtype = self.dtype
+            expected_length = self.shape.position
+        else:
+            sequence = sequences[0]
+            expected_dtype = type(sequence)
+            if not issubclass(expected_dtype, IUPACSequence):
+                raise TypeError(
+                    "Each sequence must be of type %r, not type %r"
+                    % (IUPACSequence.__name__, expected_dtype.__name__))
+            expected_length = len(sequence)
+
+        for sequence in sequences:
+            dtype = type(sequence)
+            if dtype is not expected_dtype:
+                raise TypeError(
+                    "Sequences in MSA must have matching type. Type %r does "
+                    "not match type %r" % (dtype.__name__,
+                                           expected_dtype.__name__))
+
+            length = len(sequence)
+            if length != expected_length:
+                raise ValueError(
+                    "Each sequence's length must match the number of "
+                    "positions in the MSA: %d != %d"
+                    % (length, expected_length))
+
+    def join(self, other, how='strict'):
+        """Join this MSA with another by sequence (horizontally).
+
+        Sequences will be joined by index labels. MSA ``positional_metadata``
+        will be joined by columns. Use `how` to control join behavior.
+
+        Alignment is **not** recomputed during join operation (see *Notes*
+        section for details).
+
+        Parameters
+        ----------
+        other : TabularMSA
+            MSA to join with. Must have same ``dtype`` as this MSA.
+        how : {'strict', 'inner', 'outer', 'left', 'right'}, optional
+            How to join the sequences and MSA `positional_metadata`:
+
+            * ``'strict'``: MSA indexes and `positional_metadata` columns must
+              match
+
+            * ``'inner'``: an inner-join of the MSA indexes and
+              ``positional_metadata`` columns (only the shared set of index
+              labels and columns are used)
+
+            * ``'outer'``: an outer-join of the MSA indexes and
+              ``positional_metadata`` columns (all index labels and columns are
+              used). Unshared sequences will be padded with the MSA's default
+              gap character (``TabularMSA.dtype.default_gap_char``). Unshared
+              columns will be padded with NaN.
+
+            * ``'left'``: a left-outer-join of the MSA indexes and
+              ``positional_metadata`` columns (this MSA's index labels and
+              columns are used). Padding of unshared data is handled the same
+              as ``'outer'``.
+
+            * ``'right'``: a right-outer-join of the MSA indexes and
+              ``positional_metadata`` columns (`other` index labels and columns
+              are used). Padding of unshared data is handled the same as
+              ``'outer'``.
+
+        Returns
+        -------
+        TabularMSA
+            Joined MSA. There is no guaranteed ordering to its index (call
+            ``sort`` to define one).
+
+        Raises
+        ------
+        ValueError
+            If `how` is invalid.
+        ValueError
+            If either the index of this MSA or the index of `other` contains
+            duplicates.
+        ValueError
+            If ``how='strict'`` and this MSA's index doesn't match with
+            `other`.
+        ValueError
+            If ``how='strict'`` and this MSA's ``positional_metadata`` columns
+            don't match with `other`.
+        TypeError
+            If `other` is not a subclass of ``TabularMSA``.
+        TypeError
+            If the ``dtype`` of `other` does not match this MSA's ``dtype``.
+
+        See Also
+        --------
+        extend
+        sort
+        skbio.sequence.Sequence.concat
+
+        Notes
+        -----
+        The join operation does not automatically perform re-alignment;
+        sequences are simply joined together. Therefore, this operation is not
+        necessarily meaningful on its own.
+
+        The index labels of this MSA must be unique. Likewise, the index labels
+        of `other` must be unique.
+
+        The MSA-wide and per-sequence metadata (``TabularMSA.metadata`` and
+        ``Sequence.metadata``) are not retained on the joined ``TabularMSA``.
+
+        The positional metadata of the sequences will be outer-joined,
+        regardless of `how` (using ``Sequence.concat(how='outer')``).
+
+        If the join operation results in a ``TabularMSA`` without any
+        sequences, the MSA's ``positional_metadata`` will not be set.
+
+        Examples
+        --------
+        Join MSAs by sequence:
+
+        >>> from skbio import DNA, TabularMSA
+        >>> msa1 = TabularMSA([DNA('AC'),
+        ...                    DNA('A-')])
+        >>> msa2 = TabularMSA([DNA('G-T'),
+        ...                    DNA('T--')])
+        >>> joined = msa1.join(msa2)
+        >>> joined
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 5
+        ---------------------
+        ACG-T
+        A-T--
+
+        Sequences are joined based on MSA index labels:
+
+        >>> msa1 = TabularMSA([DNA('AC'),
+        ...                    DNA('A-')], index=['a', 'b'])
+        >>> msa2 = TabularMSA([DNA('G-T'),
+        ...                    DNA('T--')], index=['b', 'a'])
+        >>> joined = msa1.join(msa2)
+        >>> joined
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 2
+            position count: 5
+        ---------------------
+        ACT--
+        A-G-T
+        >>> joined.index
+        Index(['a', 'b'], dtype='object')
+
+        By default both MSA indexes must match. Use ``how`` to specify an inner
+        join:
+
+        >>> msa1 = TabularMSA([DNA('AC'),
+        ...                    DNA('A-'),
+        ...                    DNA('-C')], index=['a', 'b', 'c'],
+        ...                   positional_metadata={'col1': [42, 43],
+        ...                                        'col2': [1, 2]})
+        >>> msa2 = TabularMSA([DNA('G-T'),
+        ...                    DNA('T--'),
+        ...                    DNA('ACG')], index=['b', 'a', 'z'],
+        ...                   positional_metadata={'col2': [3, 4, 5],
+        ...                                        'col3': ['f', 'o', 'o']})
+        >>> joined = msa1.join(msa2, how='inner')
+        >>> joined
+        TabularMSA[DNA]
+        --------------------------
+        Positional metadata:
+            'col2': <dtype: int64>
+        Stats:
+            sequence count: 2
+            position count: 5
+        --------------------------
+        A-G-T
+        ACT--
+        >>> joined.index
+        Index(['b', 'a'], dtype='object')
+        >>> joined.positional_metadata
+           col2
+        0     1
+        1     2
+        2     3
+        3     4
+        4     5
+
+        When performing an outer join (``'outer'``, ``'left'``, or
+        ``'right'``), unshared sequences are padded with gaps and unshared
+        ``positional_metadata`` columns are padded with NaN:
+
+        >>> joined = msa1.join(msa2, how='outer')
+        >>> joined
+        TabularMSA[DNA]
+        ----------------------------
+        Positional metadata:
+            'col1': <dtype: float64>
+            'col2': <dtype: int64>
+            'col3': <dtype: object>
+        Stats:
+            sequence count: 4
+            position count: 5
+        ----------------------------
+        ACT--
+        A-G-T
+        -C---
+        --ACG
+        >>> joined.index
+        Index(['a', 'b', 'c', 'z'], dtype='object')
+        >>> joined.positional_metadata
+           col1  col2 col3
+        0    42     1  NaN
+        1    43     2  NaN
+        2   NaN     3    f
+        3   NaN     4    o
+        4   NaN     5    o
+
+        """
+        if how not in {'strict', 'inner', 'outer', 'left', 'right'}:
+            raise ValueError(
+                "`how` must be 'strict', 'inner', 'outer', 'left', or "
+                "'right'.")
+
+        self._assert_joinable(other)
+
+        join_index, concat_kwargs = self._get_join_index(other, how)
+
+        joined_seqs = []
+        for label in join_index:
+            left_seq = self._get_sequence_for_join(label)
+            right_seq = other._get_sequence_for_join(label)
+
+            joined_seqs.append(
+                self.dtype.concat([left_seq, right_seq], how='outer'))
+
+        # TODO: update when #1198 is implemented.
+        joined_positional_metadata = None
+        if joined_seqs:
+            joined_positional_metadata = pd.concat(
+                [self.positional_metadata, other.positional_metadata],
+                ignore_index=True, **concat_kwargs)
+
+            if not self.has_positional_metadata():
+                del self.positional_metadata
+            if not other.has_positional_metadata():
+                del other.positional_metadata
+
+        joined = self.__class__(joined_seqs, index=join_index,
+                                positional_metadata=joined_positional_metadata)
+
+        if not joined.has_positional_metadata():
+            del joined.positional_metadata
+
+        return joined
+
+    def _assert_joinable(self, other):
+        if not isinstance(other, TabularMSA):
+            raise TypeError(
+                "`other` must be a `TabularMSA` object, not type %r" %
+                type(other).__name__)
+
+        if self.dtype is not other.dtype:
+            raise TypeError(
+                "`other` dtype %r does not match this MSA's dtype %r" %
+                (other.dtype if other.dtype is None else other.dtype.__name__,
+                 self.dtype if self.dtype is None else self.dtype.__name__))
+
+        if not self.index.is_unique:
+            raise ValueError(
+                "This MSA's index labels must be unique.")
+        if not other.index.is_unique:
+            raise ValueError(
+                "`other`'s index labels must be unique.")
+
+    def _get_join_index(self, other, how):
+        if how == 'strict':
+            diff = self.index.sym_diff(other.index)
+            if len(diff) > 0:
+                raise ValueError(
+                    "Index labels must all match with `how='strict'`")
+
+            diff = self.positional_metadata.columns.sym_diff(
+                other.positional_metadata.columns)
+
+            if not self.has_positional_metadata():
+                del self.positional_metadata
+            if not other.has_positional_metadata():
+                del other.positional_metadata
+
+            if len(diff) > 0:
+                raise ValueError(
+                    "Positional metadata columns must all match with "
+                    "`how='strict'`")
+
+            join_index = self.index
+            concat_kwargs = {'join': 'inner'}
+        elif how == 'inner':
+            join_index = self.index.intersection(other.index)
+            concat_kwargs = {'join': 'inner'}
+        elif how == 'outer':
+            join_index = self.index.union(other.index)
+            concat_kwargs = {'join': 'outer'}
+        elif how == 'left':
+            join_index = self.index
+            concat_kwargs = {'join_axes': [self.positional_metadata.columns]}
+        else:  # how='right'
+            join_index = other.index
+            concat_kwargs = {'join_axes': [other.positional_metadata.columns]}
+
+        return join_index, concat_kwargs
+
+    def _get_sequence_for_join(self, label):
+        if label in self.index:
+            return self.loc[label]
+        else:
+            return self.dtype(
+                self.dtype.default_gap_char * self.shape.position)
+
+    def sort(self, level=None, ascending=True):
+        """Sort sequences by index label in-place.
+
+        Parameters
+        ----------
+        level : int or object, optional
+            Index level to sort on when index is a ``pd.MultiIndex``. Does
+            nothing otherwise.
+        ascending: bool, optional
+            If ``False``, sort in descending (i.e., reverse) order.
+
+        See Also
+        --------
+        index
+        reassign_index
+        pandas.Series.sort_index
+
+        Notes
+        -----
+        This is a passthrough to ``pd.Series.sort_index`` internally.
+
+        Examples
+        --------
+        Create a ``TabularMSA`` object with sequence identifiers as index
+        labels:
+
+        >>> from skbio import DNA, TabularMSA
+        >>> seqs = [DNA('ACG', metadata={'id': 'c'}),
+        ...         DNA('AC-', metadata={'id': 'b'}),
+        ...         DNA('AC-', metadata={'id': 'a'})]
+        >>> msa = TabularMSA(seqs, minter='id')
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 3
+            position count: 3
+        ---------------------
+        ACG
+        AC-
+        AC-
+        >>> msa.index
+        Index(['c', 'b', 'a'], dtype='object')
+
+        Sort the sequences in alphabetical order by index label:
+
+        >>> msa.sort()
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 3
+            position count: 3
+        ---------------------
+        AC-
+        AC-
+        ACG
+        >>> msa.index
+        Index(['a', 'b', 'c'], dtype='object')
+
+        Note that since the sort is in-place, the ``TabularMSA`` object is
+        modified (a new object is *not* returned).
+
+        """
+        series = self._seqs.sort_index(ascending=ascending, level=level)
+        self._seqs = series
+
+    @experimental(as_of='0.4.1')
+    def to_dict(self):
+        """Create a ``dict`` from this ``TabularMSA``.
+
+        Returns
+        -------
+        dict
+            Dictionary constructed from the index labels and sequences in this
+            ``TabularMSA``.
+
+        Raises
+        ------
+        ValueError
+            If index labels are not unique.
+
+        See Also
+        --------
+        from_dict
+        index
+        reassign_index
+
+        Examples
+        --------
+        >>> from skbio import DNA, TabularMSA
+        >>> seqs = [DNA('ACGT'), DNA('A--T')]
+        >>> msa = TabularMSA(seqs, index=['a', 'b'])
+        >>> dictionary = msa.to_dict()
+        >>> dictionary == {'a': DNA('ACGT'), 'b': DNA('A--T')}
+        True
+
+        """
+        if self.index.is_unique:
+            return self._seqs.to_dict()
+        else:
+            raise ValueError("Cannot convert to dict. Index labels are not"
+                             " unique.")
+
+    def _is_sequence_axis(self, axis):
+        if axis == 'sequence' or axis == 0:
+            return True
+        elif axis == 'position' or axis == 1:
+            return False
+        else:
+            raise ValueError(
+                "`axis` must be 'sequence' (0) or 'position' (1), not %r"
+                % axis)
+
+    @overrides(PositionalMetadataMixin)
+    def _positional_metadata_axis_len_(self):
+        return self.shape.position
diff --git a/skbio/alignment/tests/test_alignment.py b/skbio/alignment/tests/test_alignment.py
deleted file mode 100644
index 80638d5..0000000
--- a/skbio/alignment/tests/test_alignment.py
+++ /dev/null
@@ -1,735 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-import six
-
-from unittest import TestCase, main
-from collections import Counter, defaultdict
-
-import numpy as np
-from scipy.spatial.distance import hamming
-
-from skbio import (Sequence, DNA, RNA,
-                   DistanceMatrix, Alignment, SequenceCollection)
-from skbio.alignment import (SequenceCollectionError, AlignmentError)
-
-
-class SequenceCollectionTests(TestCase):
-    def setUp(self):
-        self.d1 = DNA('GATTACA', metadata={'id': "d1"})
-        self.d2 = DNA('TTG', metadata={'id': "d2"})
-        self.d3 = DNA('GTATACA', metadata={'id': "d3"})
-        self.r1 = RNA('GAUUACA', metadata={'id': "r1"})
-        self.r2 = RNA('UUG', metadata={'id': "r2"})
-        self.r3 = RNA('U-----UGCC--', metadata={'id': "r3"})
-
-        self.seqs1 = [self.d1, self.d2]
-        self.seqs2 = [self.r1, self.r2, self.r3]
-        self.seqs3 = self.seqs1 + self.seqs2
-        self.seqs4 = [self.d1, self.d3]
-
-        self.s1 = SequenceCollection(self.seqs1)
-        self.s2 = SequenceCollection(self.seqs2)
-        self.s3 = SequenceCollection(self.seqs3)
-        self.s4 = SequenceCollection(self.seqs4)
-        self.empty = SequenceCollection([])
-
-    def test_init(self):
-        SequenceCollection(self.seqs1)
-        SequenceCollection(self.seqs2)
-        SequenceCollection(self.seqs3)
-        SequenceCollection([])
-
-    def test_init_fail(self):
-        # sequences with overlapping ids
-        s1 = [self.d1, self.d1]
-        self.assertRaises(SequenceCollectionError, SequenceCollection, s1)
-
-    def test_init_fail_no_id(self):
-        seq = Sequence('ACGTACGT')
-        with six.assertRaisesRegex(self, SequenceCollectionError,
-                                   "'id' must be included in the sequence "
-                                   "metadata"):
-            SequenceCollection([seq])
-
-    def test_contains(self):
-        self.assertTrue('d1' in self.s1)
-        self.assertTrue('r2' in self.s2)
-        self.assertFalse('r2' in self.s1)
-
-    def test_eq(self):
-        self.assertTrue(self.s1 == self.s1)
-        self.assertFalse(self.s1 == self.s2)
-
-        # different objects can be equal
-        self.assertTrue(self.s1 == SequenceCollection([self.d1, self.d2]))
-        self.assertTrue(SequenceCollection([self.d1, self.d2]) == self.s1)
-
-        # SequenceCollections with different number of sequences are not equal
-        self.assertFalse(self.s1 == SequenceCollection([self.d1]))
-
-        class FakeSequenceCollection(SequenceCollection):
-            pass
-        # SequenceCollections of different types are not equal
-        self.assertFalse(self.s4 == FakeSequenceCollection([self.d1, self.d3]))
-        self.assertFalse(self.s4 == Alignment([self.d1, self.d3]))
-
-        # SequenceCollections with different sequences are not equal
-        self.assertFalse(self.s1 == SequenceCollection([self.d1, self.r1]))
-
-    def test_getitem(self):
-        self.assertEqual(self.s1[0], self.d1)
-        self.assertEqual(self.s1[1], self.d2)
-        self.assertEqual(self.s2[0], self.r1)
-        self.assertEqual(self.s2[1], self.r2)
-
-        self.assertRaises(IndexError, self.empty.__getitem__, 0)
-        self.assertRaises(KeyError, self.empty.__getitem__, '0')
-
-    def test_iter(self):
-        s1_iter = iter(self.s1)
-        count = 0
-        for actual, expected in zip(s1_iter, self.seqs1):
-            count += 1
-            self.assertEqual(actual, expected)
-        self.assertEqual(count, len(self.seqs1))
-        self.assertRaises(StopIteration, lambda: next(s1_iter))
-
-    def test_len(self):
-        self.assertEqual(len(self.s1), 2)
-        self.assertEqual(len(self.s2), 3)
-        self.assertEqual(len(self.s3), 5)
-        self.assertEqual(len(self.empty), 0)
-
-    def test_ne(self):
-        self.assertFalse(self.s1 != self.s1)
-        self.assertTrue(self.s1 != self.s2)
-
-        # SequenceCollections with different number of sequences are not equal
-        self.assertTrue(self.s1 != SequenceCollection([self.d1]))
-
-        class FakeSequenceCollection(SequenceCollection):
-            pass
-        # SequenceCollections of different types are not equal
-        self.assertTrue(self.s4 != FakeSequenceCollection([self.d1, self.d3]))
-        self.assertTrue(self.s4 != Alignment([self.d1, self.d3]))
-
-        # SequenceCollections with different sequences are not equal
-        self.assertTrue(self.s1 !=
-                        SequenceCollection([self.d1, self.r1]))
-
-    def test_repr(self):
-        self.assertEqual(repr(self.s1),
-                         "<SequenceCollection: n=2; "
-                         "mean +/- std length=5.00 +/- 2.00>")
-        self.assertEqual(repr(self.s2),
-                         "<SequenceCollection: n=3; "
-                         "mean +/- std length=7.33 +/- 3.68>")
-        self.assertEqual(repr(self.s3),
-                         "<SequenceCollection: n=5; "
-                         "mean +/- std length=6.40 +/- 3.32>")
-        self.assertEqual(repr(self.empty),
-                         "<SequenceCollection: n=0; "
-                         "mean +/- std length=0.00 +/- 0.00>")
-
-    def test_reversed(self):
-        s1_iter = reversed(self.s1)
-        count = 0
-        for actual, expected in zip(s1_iter, self.seqs1[::-1]):
-            count += 1
-            self.assertEqual(actual, expected)
-        self.assertEqual(count, len(self.seqs1))
-        self.assertRaises(StopIteration, lambda: next(s1_iter))
-
-    def test_kmer_frequencies(self):
-        expected1 = Counter({'GAT': 1, 'TAC': 1})
-        expected2 = Counter({'TTG': 1})
-        self.assertEqual(
-            self.s1.kmer_frequencies(k=3, overlap=False, relative=False),
-            [expected1, expected2])
-
-        expected1 = defaultdict(float)
-        expected1['A'] = 3 / 7.
-        expected1['C'] = 1 / 7.
-        expected1['G'] = 1 / 7.
-        expected1['T'] = 2 / 7.
-        expected2 = defaultdict(float)
-        expected2['G'] = 1 / 3.
-        expected2['T'] = 2 / 3.
-        self.assertEqual(self.s1.kmer_frequencies(k=1, relative=True),
-                         [expected1, expected2])
-
-        expected1 = defaultdict(float)
-        expected1['GAT'] = 1 / 2.
-        expected1['TAC'] = 1 / 2.
-        expected2 = defaultdict(float)
-        expected2['TTG'] = 1 / 1.
-        self.assertEqual(
-            self.s1.kmer_frequencies(k=3, overlap=False, relative=True),
-            [expected1, expected2])
-
-        self.assertEqual(self.empty.kmer_frequencies(k=1, relative=True), [])
-
-        # Test to ensure floating point precision bug isn't present. See the
-        # tests for Sequence.kmer_frequencies for more details.
-        sc = SequenceCollection([RNA('C' * 10, metadata={'id': 's1'}),
-                                 RNA('G' * 10, metadata={'id': 's2'})])
-        self.assertEqual(sc.kmer_frequencies(1, relative=True),
-                         [defaultdict(float, {'C': 1.0}),
-                          defaultdict(float, {'G': 1.0})])
-
-    def test_str(self):
-        exp1 = ">d1\nGATTACA\n>d2\nTTG\n"
-        self.assertEqual(str(self.s1), exp1)
-        exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n"
-        self.assertEqual(str(self.s2), exp2)
-        exp4 = ""
-        self.assertEqual(str(self.empty), exp4)
-
-    def test_distances(self):
-        s1 = SequenceCollection([DNA("ACGT", metadata={'id': "d1"}),
-                                 DNA("ACGG", metadata={'id': "d2"})])
-        expected = [[0, 0.25],
-                    [0.25, 0]]
-        expected = DistanceMatrix(expected, ['d1', 'd2'])
-
-        def h(s1, s2):
-            return hamming(s1.values, s2.values)
-        actual = s1.distances(h)
-        self.assertEqual(actual, expected)
-
-        # alt distance function provided
-        def dumb_distance(s1, s2):
-            return 42.
-        expected = [[0, 42.],
-                    [42., 0]]
-        expected = DistanceMatrix(expected, ['d1', 'd2'])
-        actual = s1.distances(dumb_distance)
-        self.assertEqual(actual, expected)
-
-    def test_distribution_stats(self):
-        actual1 = self.s1.distribution_stats()
-        self.assertEqual(actual1[0], 2)
-        self.assertAlmostEqual(actual1[1], 5.0, 3)
-        self.assertAlmostEqual(actual1[2], 2.0, 3)
-
-        actual2 = self.s2.distribution_stats()
-        self.assertEqual(actual2[0], 3)
-        self.assertAlmostEqual(actual2[1], 7.333, 3)
-        self.assertAlmostEqual(actual2[2], 3.682, 3)
-
-        actual3 = self.s3.distribution_stats()
-        self.assertEqual(actual3[0], 5)
-        self.assertAlmostEqual(actual3[1], 6.400, 3)
-        self.assertAlmostEqual(actual3[2], 3.323, 3)
-
-        actual4 = self.empty.distribution_stats()
-        self.assertEqual(actual4[0], 0)
-        self.assertEqual(actual4[1], 0.0)
-        self.assertEqual(actual4[2], 0.0)
-
-    def test_degap(self):
-        expected = SequenceCollection([
-            RNA('GAUUACA', metadata={'id': "r1"}),
-            RNA('UUG', metadata={'id': "r2"}),
-            RNA('UUGCC', metadata={'id': "r3"})])
-        actual = self.s2.degap()
-        self.assertEqual(actual, expected)
-
-    def test_get_seq(self):
-        self.assertEqual(self.s1.get_seq('d1'), self.d1)
-        self.assertEqual(self.s1.get_seq('d2'), self.d2)
-
-    def test_ids(self):
-        self.assertEqual(self.s1.ids(), ['d1', 'd2'])
-        self.assertEqual(self.s2.ids(), ['r1', 'r2', 'r3'])
-        self.assertEqual(self.s3.ids(),
-                         ['d1', 'd2', 'r1', 'r2', 'r3'])
-        self.assertEqual(self.empty.ids(), [])
-
-    def test_update_ids_default_behavior(self):
-        # 3 seqs
-        exp_sc = SequenceCollection([
-            RNA('GAUUACA', metadata={'id': "1"}),
-            RNA('UUG', metadata={'id': "2"}),
-            RNA('U-----UGCC--', metadata={'id': "3"})
-        ])
-        exp_id_map = {'1': 'r1', '2': 'r2', '3': 'r3'}
-        obs_sc, obs_id_map = self.s2.update_ids()
-        self.assertEqual(obs_sc, exp_sc)
-        self.assertEqual(obs_id_map, exp_id_map)
-
-        # empty
-        obs_sc, obs_id_map = self.empty.update_ids()
-        self.assertEqual(obs_sc, self.empty)
-        self.assertEqual(obs_id_map, {})
-
-    def test_update_ids_prefix(self):
-        # 3 seqs
-        exp_sc = SequenceCollection([
-            RNA('GAUUACA', metadata={'id': "abc1"}),
-            RNA('UUG', metadata={'id': "abc2"}),
-            RNA('U-----UGCC--', metadata={'id': "abc3"})
-        ])
-        exp_id_map = {'abc1': 'r1', 'abc2': 'r2', 'abc3': 'r3'}
-        obs_sc, obs_id_map = self.s2.update_ids(prefix='abc')
-        self.assertEqual(obs_sc, exp_sc)
-        self.assertEqual(obs_id_map, exp_id_map)
-
-        # empty
-        obs_sc, obs_id_map = self.empty.update_ids(prefix='abc')
-        self.assertEqual(obs_sc, self.empty)
-        self.assertEqual(obs_id_map, {})
-
-    def test_update_ids_func_parameter(self):
-        def append_42(ids):
-            return [id_ + '-42' for id_ in ids]
-
-        # 3 seqs
-        exp_sc = SequenceCollection([
-            RNA('GAUUACA', metadata={'id': "r1-42"}),
-            RNA('UUG', metadata={'id': "r2-42"}),
-            RNA('U-----UGCC--', metadata={'id': "r3-42"})
-        ])
-        exp_id_map = {'r1-42': 'r1', 'r2-42': 'r2', 'r3-42': 'r3'}
-        obs_sc, obs_id_map = self.s2.update_ids(func=append_42)
-        self.assertEqual(obs_sc, exp_sc)
-        self.assertEqual(obs_id_map, exp_id_map)
-
-        # empty
-        obs_sc, obs_id_map = self.empty.update_ids(func=append_42)
-        self.assertEqual(obs_sc, self.empty)
-        self.assertEqual(obs_id_map, {})
-
-    def test_update_ids_ids_parameter(self):
-        # 3 seqs
-        exp_sc = SequenceCollection([
-            RNA('GAUUACA', metadata={'id': "abc"}),
-            RNA('UUG', metadata={'id': "def"}),
-            RNA('U-----UGCC--', metadata={'id': "ghi"})
-        ])
-        exp_id_map = {'abc': 'r1', 'def': 'r2', 'ghi': 'r3'}
-        obs_sc, obs_id_map = self.s2.update_ids(ids=('abc', 'def', 'ghi'))
-        self.assertEqual(obs_sc, exp_sc)
-        self.assertEqual(obs_id_map, exp_id_map)
-
-        # empty
-        obs_sc, obs_id_map = self.empty.update_ids(ids=[])
-        self.assertEqual(obs_sc, self.empty)
-        self.assertEqual(obs_id_map, {})
-
-    def test_update_ids_sequence_attributes_propagated(self):
-        # 1 seq
-        exp_sc = Alignment([
-            DNA('ACGT', metadata={'id': "abc", 'description': 'desc'},
-                positional_metadata={'quality': range(4)})
-        ])
-        exp_id_map = {'abc': 'seq1'}
-
-        obj = Alignment([
-            DNA('ACGT', metadata={'id': "seq1", 'description': 'desc'},
-                positional_metadata={'quality': range(4)})
-        ])
-
-        obs_sc, obs_id_map = obj.update_ids(ids=('abc',))
-        self.assertEqual(obs_sc, exp_sc)
-        self.assertEqual(obs_id_map, exp_id_map)
-
-        # 2 seqs
-        exp_sc = Alignment([
-            DNA('ACGT', metadata={'id': "abc", 'description': 'desc1'},
-                positional_metadata={'quality': range(4)}),
-            DNA('TGCA', metadata={'id': "def", 'description': 'desc2'},
-                positional_metadata={'quality': range(4)[::-1]})
-        ])
-        exp_id_map = {'abc': 'seq1', 'def': 'seq2'}
-
-        obj = Alignment([
-            DNA('ACGT', metadata={'id': "seq1", 'description': 'desc1'},
-                positional_metadata={'quality': (0, 1, 2, 3)}),
-            DNA('TGCA', metadata={'id': "seq2", 'description': 'desc2'},
-                positional_metadata={'quality': (3, 2, 1, 0)})
-        ])
-
-        obs_sc, obs_id_map = obj.update_ids(ids=('abc', 'def'))
-        self.assertEqual(obs_sc, exp_sc)
-        self.assertEqual(obs_id_map, exp_id_map)
-
-    def test_update_ids_invalid_parameter_combos(self):
-        with six.assertRaisesRegex(self, SequenceCollectionError,
-                                   'ids and func'):
-            self.s1.update_ids(func=lambda e: e, ids=['foo', 'bar'])
-
-        with six.assertRaisesRegex(self, SequenceCollectionError, 'prefix'):
-            self.s1.update_ids(ids=['foo', 'bar'], prefix='abc')
-
-        with six.assertRaisesRegex(self, SequenceCollectionError, 'prefix'):
-            self.s1.update_ids(func=lambda e: e, prefix='abc')
-
-    def test_update_ids_invalid_ids(self):
-        # incorrect number of new ids
-        with six.assertRaisesRegex(self, SequenceCollectionError, '3 != 2'):
-            self.s1.update_ids(ids=['foo', 'bar', 'baz'])
-        with six.assertRaisesRegex(self, SequenceCollectionError, '4 != 2'):
-            self.s1.update_ids(func=lambda e: ['foo', 'bar', 'baz', 'abc'])
-
-        # duplicates
-        with six.assertRaisesRegex(self, SequenceCollectionError, 'foo'):
-            self.s2.update_ids(ids=['foo', 'bar', 'foo'])
-        with six.assertRaisesRegex(self, SequenceCollectionError, 'bar'):
-            self.s2.update_ids(func=lambda e: ['foo', 'bar', 'bar'])
-
-    def test_is_empty(self):
-        self.assertFalse(self.s1.is_empty())
-        self.assertFalse(self.s2.is_empty())
-        self.assertFalse(self.s3.is_empty())
-
-        self.assertTrue(self.empty.is_empty())
-
-    def test_iteritems(self):
-        self.assertEqual(list(self.s1.iteritems()),
-                         [(s.metadata['id'], s) for s in self.s1])
-
-    def test_sequence_count(self):
-        self.assertEqual(self.s1.sequence_count(), 2)
-        self.assertEqual(self.s2.sequence_count(), 3)
-        self.assertEqual(self.s3.sequence_count(), 5)
-        self.assertEqual(self.empty.sequence_count(), 0)
-
-    def test_sequence_lengths(self):
-        self.assertEqual(self.s1.sequence_lengths(), [7, 3])
-        self.assertEqual(self.s2.sequence_lengths(), [7, 3, 12])
-        self.assertEqual(self.s3.sequence_lengths(), [7, 3, 7, 3, 12])
-        self.assertEqual(self.empty.sequence_lengths(), [])
-
-
-class AlignmentTests(TestCase):
-
-    def setUp(self):
-        self.d1 = DNA('..ACC-GTTGG..', metadata={'id': "d1"})
-        self.d2 = DNA('TTACCGGT-GGCC', metadata={'id': "d2"})
-        self.d3 = DNA('.-ACC-GTTGC--', metadata={'id': "d3"})
-
-        self.r1 = RNA('UUAU-', metadata={'id': "r1"})
-        self.r2 = RNA('ACGUU', metadata={'id': "r2"})
-
-        self.seqs1 = [self.d1, self.d2, self.d3]
-        self.seqs2 = [self.r1, self.r2]
-
-        self.a1 = Alignment(self.seqs1)
-        self.a2 = Alignment(self.seqs2)
-        self.a3 = Alignment(self.seqs2, score=42.0,
-                            start_end_positions=[(0, 3), (5, 9)])
-        self.a4 = Alignment(self.seqs2, score=-42.0,
-                            start_end_positions=[(1, 4), (6, 10)])
-
-        # no sequences
-        self.empty = Alignment([])
-
-        # sequences, but no positions
-        self.no_positions = Alignment([RNA('', metadata={'id': 'a'}),
-                                       RNA('', metadata={'id': 'b'})])
-
-    def test_degap(self):
-        expected = SequenceCollection([
-            DNA('ACCGTTGG', metadata={'id': "d1"}),
-            DNA('TTACCGGTGGCC', metadata={'id': "d2"}),
-            DNA('ACCGTTGC', metadata={'id': "d3"})])
-        actual = self.a1.degap()
-        self.assertEqual(actual, expected)
-
-        expected = SequenceCollection([
-            RNA('UUAU', metadata={'id': "r1"}),
-            RNA('ACGUU', metadata={'id': "r2"})])
-        actual = self.a2.degap()
-        self.assertEqual(actual, expected)
-
-    def test_distances(self):
-        expected = [[0, 6. / 13, 4. / 13],
-                    [6. / 13, 0, 7. / 13],
-                    [4. / 13, 7. / 13, 0]]
-        expected = DistanceMatrix(expected, ['d1', 'd2', 'd3'])
-        actual = self.a1.distances()
-        self.assertEqual(actual, expected)
-
-        # alt distance function provided
-        def dumb_distance(s1, s2):
-            return 42.
-        expected = [[0, 42., 42.],
-                    [42., 0, 42.],
-                    [42., 42., 0]]
-        expected = DistanceMatrix(expected, ['d1', 'd2', 'd3'])
-        actual = self.a1.distances(dumb_distance)
-        self.assertEqual(actual, expected)
-
-    def test_score(self):
-        self.assertEqual(self.a3.score(), 42.0)
-        self.assertEqual(self.a4.score(), -42.0)
-
-    def test_start_end_positions(self):
-        self.assertEqual(self.a3.start_end_positions(), [(0, 3), (5, 9)])
-        self.assertEqual(self.a4.start_end_positions(), [(1, 4), (6, 10)])
-
-    def test_subalignment(self):
-        # keep seqs by ids
-        actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3'])
-        expected = Alignment([self.d1, self.d3])
-        self.assertEqual(actual, expected)
-
-        # keep seqs by indices
-        actual = self.a1.subalignment(seqs_to_keep=[0, 2])
-        expected = Alignment([self.d1, self.d3])
-        self.assertEqual(actual, expected)
-
-        # keep seqs by ids (invert)
-        actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3'],
-                                      invert_seqs_to_keep=True)
-        expected = Alignment([self.d2])
-        self.assertEqual(actual, expected)
-
-        # keep seqs by indices (invert)
-        actual = self.a1.subalignment(seqs_to_keep=[0, 2],
-                                      invert_seqs_to_keep=True)
-        expected = Alignment([self.d2])
-        self.assertEqual(actual, expected)
-
-        # keep positions
-        actual = self.a1.subalignment(positions_to_keep=[0, 2, 3])
-        d1 = DNA('.AC', metadata={'id': "d1"})
-        d2 = DNA('TAC', metadata={'id': "d2"})
-        d3 = DNA('.AC', metadata={'id': "d3"})
-        expected = Alignment([d1, d2, d3])
-        self.assertEqual(actual, expected)
-
-        # keep positions (invert)
-        actual = self.a1.subalignment(positions_to_keep=[0, 2, 3],
-                                      invert_positions_to_keep=True)
-        d1 = DNA('.C-GTTGG..', metadata={'id': "d1"})
-        d2 = DNA('TCGGT-GGCC', metadata={'id': "d2"})
-        d3 = DNA('-C-GTTGC--', metadata={'id': "d3"})
-        expected = Alignment([d1, d2, d3])
-        self.assertEqual(actual, expected)
-
-        # keep seqs and positions
-        actual = self.a1.subalignment(seqs_to_keep=[0, 2],
-                                      positions_to_keep=[0, 2, 3])
-        d1 = DNA('.AC', metadata={'id': "d1"})
-        d3 = DNA('.AC', metadata={'id': "d3"})
-        expected = Alignment([d1, d3])
-        self.assertEqual(actual, expected)
-
-        # keep seqs and positions (invert)
-        actual = self.a1.subalignment(seqs_to_keep=[0, 2],
-                                      positions_to_keep=[0, 2, 3],
-                                      invert_seqs_to_keep=True,
-                                      invert_positions_to_keep=True)
-        d2 = DNA('TCGGT-GGCC', metadata={'id': "d2"})
-        expected = Alignment([d2])
-        self.assertEqual(actual, expected)
-
-    def test_subalignment_filter_out_everything(self):
-        exp = Alignment([])
-
-        # no sequences
-        obs = self.a1.subalignment(seqs_to_keep=None, invert_seqs_to_keep=True)
-        self.assertEqual(obs, exp)
-
-        # no positions
-        obs = self.a1.subalignment(positions_to_keep=None,
-                                   invert_positions_to_keep=True)
-        self.assertEqual(obs, exp)
-
-    def test_init_not_equal_lengths(self):
-        invalid_seqs = [self.d1, self.d2, self.d3,
-                        DNA('.-ACC-GTGC--', metadata={'id': "i2"})]
-        self.assertRaises(AlignmentError, Alignment,
-                          invalid_seqs)
-
-    def test_init_equal_lengths(self):
-        seqs = [self.d1, self.d2, self.d3]
-        Alignment(seqs)
-
-    def test_iter_positions(self):
-        actual = list(self.a2.iter_positions())
-        expected = [
-            [RNA('U', metadata={'id': 'r1'}), RNA('A', metadata={'id': 'r2'})],
-            [RNA('U', metadata={'id': 'r1'}), RNA('C', metadata={'id': 'r2'})],
-            [RNA('A', metadata={'id': 'r1'}), RNA('G', metadata={'id': 'r2'})],
-            [RNA('U', metadata={'id': 'r1'}), RNA('U', metadata={'id': 'r2'})],
-            [RNA('-', metadata={'id': 'r1'}), RNA('U', metadata={'id': 'r2'})]
-        ]
-        self.assertEqual(actual, expected)
-
-        actual = list(self.a2.iter_positions(constructor=str))
-        expected = [list('UA'),
-                    list('UC'),
-                    list('AG'),
-                    list('UU'),
-                    list('-U')]
-        self.assertEqual(actual, expected)
-
-    def test_majority_consensus(self):
-        # empty cases
-        self.assertEqual(
-            self.empty.majority_consensus(), Sequence(''))
-        self.assertEqual(
-            self.no_positions.majority_consensus(), RNA(''))
-
-        # alignment where all sequences are the same
-        aln = Alignment([DNA('AG', metadata={'id': 'a'}),
-                         DNA('AG', metadata={'id': 'b'})])
-        self.assertEqual(aln.majority_consensus(), DNA('AG'))
-
-        # no ties
-        d1 = DNA('TTT', metadata={'id': "d1"})
-        d2 = DNA('TT-', metadata={'id': "d2"})
-        d3 = DNA('TC-', metadata={'id': "d3"})
-        a1 = Alignment([d1, d2, d3])
-        self.assertEqual(a1.majority_consensus(), DNA('TT-'))
-
-        # ties
-        d1 = DNA('T', metadata={'id': "d1"})
-        d2 = DNA('A', metadata={'id': "d2"})
-        a1 = Alignment([d1, d2])
-        self.assertTrue(a1.majority_consensus() in
-                        [DNA('T'), DNA('A')])
-
-    def test_omit_gap_positions(self):
-        expected = self.a2
-        self.assertEqual(self.a2.omit_gap_positions(1.0), expected)
-        self.assertEqual(self.a2.omit_gap_positions(0.51), expected)
-
-        r1 = RNA('UUAU', metadata={'id': "r1"})
-        r2 = RNA('ACGU', metadata={'id': "r2"})
-        expected = Alignment([r1, r2])
-        self.assertEqual(self.a2.omit_gap_positions(0.49), expected)
-
-        r1 = RNA('UUAU', metadata={'id': "r1"})
-        r2 = RNA('ACGU', metadata={'id': "r2"})
-        expected = Alignment([r1, r2])
-        self.assertEqual(self.a2.omit_gap_positions(0.0), expected)
-
-        self.assertEqual(self.empty.omit_gap_positions(0.0), self.empty)
-        self.assertEqual(self.empty.omit_gap_positions(0.49), self.empty)
-        self.assertEqual(self.empty.omit_gap_positions(1.0), self.empty)
-
-        # Test to ensure floating point precision bug isn't present. See the
-        # tests for Alignment.position_frequencies for more details.
-        seqs = []
-        for i in range(33):
-            seqs.append(DNA('-.', metadata={'id': str(i)}))
-        aln = Alignment(seqs)
-        self.assertEqual(aln.omit_gap_positions(1 - np.finfo(float).eps),
-                         Alignment([DNA('', metadata={'id': str(i)})
-                                    for i in range(33)]))
-
-    def test_omit_gap_sequences(self):
-        expected = self.a2
-        self.assertEqual(self.a2.omit_gap_sequences(1.0), expected)
-        self.assertEqual(self.a2.omit_gap_sequences(0.20), expected)
-
-        expected = Alignment([self.r2])
-        self.assertEqual(self.a2.omit_gap_sequences(0.19), expected)
-
-        self.assertEqual(self.empty.omit_gap_sequences(0.0), self.empty)
-        self.assertEqual(self.empty.omit_gap_sequences(0.2), self.empty)
-        self.assertEqual(self.empty.omit_gap_sequences(1.0), self.empty)
-
-        # Test to ensure floating point precision bug isn't present. See the
-        # tests for Alignment.position_frequencies for more details.
-        aln = Alignment([DNA('.' * 33, metadata={'id': 'abc'}),
-                         DNA('-' * 33, metadata={'id': 'def'})])
-        self.assertEqual(aln.omit_gap_sequences(1 - np.finfo(float).eps),
-                         Alignment([]))
-
-    def test_position_counters(self):
-        self.assertEqual(self.empty.position_counters(), [])
-
-        self.assertEqual(self.no_positions.position_counters(), [])
-
-        expected = [Counter({'U': 1, 'A': 1}),
-                    Counter({'U': 1, 'C': 1}),
-                    Counter({'A': 1, 'G': 1}),
-                    Counter({'U': 2}),
-                    Counter({'-': 1, 'U': 1})]
-        self.assertEqual(self.a2.position_counters(), expected)
-
-    def test_position_frequencies(self):
-        self.assertEqual(self.empty.position_frequencies(), [])
-
-        self.assertEqual(self.no_positions.position_frequencies(), [])
-
-        expected = [defaultdict(float, {'U': 0.5, 'A': 0.5}),
-                    defaultdict(float, {'U': 0.5, 'C': 0.5}),
-                    defaultdict(float, {'A': 0.5, 'G': 0.5}),
-                    defaultdict(float, {'U': 1.0}),
-                    defaultdict(float, {'-': 0.5, 'U': 0.5})]
-        self.assertEqual(self.a2.position_frequencies(), expected)
-
-    def test_position_frequencies_floating_point_precision(self):
-        # Test that a position with no variation yields a frequency of exactly
-        # 1.0. Note that it is important to use self.assertEqual here instead
-        # of self.assertAlmostEqual because we want to test for exactly 1.0. A
-        # previous implementation of Alignment.position_frequencies added
-        # (1 / sequence_count) for each occurrence of a character in a position
-        # to compute the frequencies (see
-        # https://github.com/biocore/scikit-bio/issues/801). In certain cases,
-        # this yielded a frequency slightly less than 1.0 due to roundoff
-        # error. The test case here uses an alignment of 10 sequences with no
-        # variation at a position. This test case exposes the roundoff error
-        # present in the previous implementation because 1/10 added 10 times
-        # yields a number slightly less than 1.0. This occurs because 1/10
-        # cannot be represented exactly as a floating point number.
-        seqs = []
-        for i in range(10):
-            seqs.append(DNA('A', metadata={'id': str(i)}))
-        aln = Alignment(seqs)
-        self.assertEqual(aln.position_frequencies(),
-                         [defaultdict(float, {'A': 1.0})])
-
-    def test_position_entropies(self):
-        # tested by calculating values as described in this post:
-        #  http://stackoverflow.com/a/15476958/3424666
-        expected = [0.69314, 0.69314, 0.69314, 0.0, np.nan]
-        np.testing.assert_almost_equal(self.a2.position_entropies(),
-                                       expected, 5)
-
-        expected = [1.0, 1.0, 1.0, 0.0, np.nan]
-        np.testing.assert_almost_equal(self.a2.position_entropies(base=2),
-                                       expected, 5)
-
-        np.testing.assert_almost_equal(self.empty.position_entropies(base=2),
-                                       [])
-
-    def test_kmer_frequencies(self):
-        expected = [defaultdict(float, {'U': 3 / 5, 'A': 1 / 5, '-': 1 / 5}),
-                    defaultdict(float, {'A': 1 / 5, 'C': 1 / 5, 'G': 1 / 5,
-                                        'U': 2 / 5})]
-        actual = self.a2.kmer_frequencies(k=1, relative=True)
-        for a, e in zip(actual, expected):
-            self.assertEqual(sorted(a), sorted(e), 5)
-            np.testing.assert_almost_equal(sorted(a.values()),
-                                           sorted(e.values()), 5)
-
-    def test_sequence_length(self):
-        self.assertEqual(self.a1.sequence_length(), 13)
-        self.assertEqual(self.a2.sequence_length(), 5)
-        self.assertEqual(self.empty.sequence_length(), 0)
-
-    def test_validate_lengths(self):
-        self.assertTrue(self.a1._validate_lengths())
-        self.assertTrue(self.a2._validate_lengths())
-        self.assertTrue(self.empty._validate_lengths())
-
-        self.assertTrue(Alignment([
-            DNA('TTT', metadata={'id': "d1"})])._validate_lengths())
-
-if __name__ == "__main__":
-    main()
diff --git a/skbio/alignment/tests/test_pairwise.py b/skbio/alignment/tests/test_pairwise.py
index 75497e4..b3d9fef 100644
--- a/skbio/alignment/tests/test_pairwise.py
+++ b/skbio/alignment/tests/test_pairwise.py
@@ -11,17 +11,43 @@ from __future__ import absolute_import, division, print_function
 from unittest import TestCase, main
 import warnings
 
+import six
 import numpy as np
 
-from skbio import Protein, DNA, Alignment
+from skbio import Sequence, Protein, DNA, RNA, TabularMSA
 from skbio.alignment import (
     global_pairwise_align_protein, local_pairwise_align_protein,
     global_pairwise_align_nucleotide, local_pairwise_align_nucleotide,
-    make_identity_substitution_matrix)
+    make_identity_substitution_matrix, local_pairwise_align,
+    global_pairwise_align)
 from skbio.alignment._pairwise import (
     _init_matrices_sw, _init_matrices_nw,
     _compute_score_and_traceback_matrices, _traceback, _first_largest,
-    _get_seq_id, _compute_substitution_score)
+    _compute_substitution_score)
+from skbio.sequence._iupac_sequence import IUPACSequence
+from skbio.util._decorator import classproperty, overrides
+
+
+class CustomSequence(IUPACSequence):
+    @classproperty
+    @overrides(IUPACSequence)
+    def gap_chars(cls):
+        return set('^$')
+
+    @classproperty
+    @overrides(IUPACSequence)
+    def default_gap_char(cls):
+        return '^'
+
+    @classproperty
+    @overrides(IUPACSequence)
+    def nondegenerate_chars(cls):
+        return set('WXYZ')
+
+    @classproperty
+    @overrides(IUPACSequence)
+    def degenerate_map(cls):
+        return {}
 
 
 class PairwiseAlignmentTests(TestCase):
@@ -63,336 +89,384 @@ class PairwiseAlignmentTests(TestCase):
                     'U': {'A': -4, 'C': -4, 'G': -4, 'T': -4, 'U':  5}}
         self.assertEqual(make_identity_substitution_matrix(5, -4), expected)
 
+    def test_global_pairwise_align_custom_alphabet(self):
+        custom_substitution_matrix = make_identity_substitution_matrix(
+            1, -1, alphabet=CustomSequence.nondegenerate_chars)
+
+        custom_msa, custom_score, custom_start_end = global_pairwise_align(
+            CustomSequence("WXYZ"), CustomSequence("WXYYZZ"),
+            10.0, 5.0, custom_substitution_matrix)
+
+        # Expected values computed by running an equivalent alignment using the
+        # DNA alphabet with the following mapping:
+        #
+        #     W X Y Z
+        #     | | | |
+        #     A C G T
+        #
+        self.assertEqual(custom_msa, TabularMSA([CustomSequence('WXYZ^^'),
+                                                 CustomSequence('WXYYZZ')]))
+        self.assertEqual(custom_score, 2.0)
+        self.assertEqual(custom_start_end, [(0, 3), (0, 5)])
+
+    def test_local_pairwise_align_custom_alphabet(self):
+        custom_substitution_matrix = make_identity_substitution_matrix(
+            5, -4, alphabet=CustomSequence.nondegenerate_chars)
+
+        custom_msa, custom_score, custom_start_end = local_pairwise_align(
+            CustomSequence("YWXXZZYWXXWYYZWXX"),
+            CustomSequence("YWWXZZZYWXYZWWX"), 5.0, 0.5,
+            custom_substitution_matrix)
+
+        # Expected values computed by running an equivalent alignment using the
+        # DNA alphabet with the following mapping:
+        #
+        #     W X Y Z
+        #     | | | |
+        #     A C G T
+        #
+        self.assertEqual(
+            custom_msa,
+            TabularMSA([CustomSequence('WXXZZYWXXWYYZWXX'),
+                        CustomSequence('WXZZZYWX^^^YZWWX')]))
+        self.assertEqual(custom_score, 41.0)
+        self.assertEqual(custom_start_end, [(1, 16), (2, 14)])
+
+    def test_global_pairwise_align_invalid_type(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   "IUPACSequence.*TabularMSA.*'Sequence'"):
+            global_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
+
+    def test_global_pairwise_align_dtype_mismatch(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   "same dtype: 'DNA' != 'RNA'"):
+            global_pairwise_align(DNA('ACGT'), TabularMSA([RNA('ACGU')]),
+                                  1.0, 1.0, {})
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   "same dtype: 'DNA' != 'RNA'"):
+            global_pairwise_align(TabularMSA([DNA('ACGT')]),
+                                  TabularMSA([RNA('ACGU')]),
+                                  1.0, 1.0, {})
+
     def test_global_pairwise_align_protein(self):
-        expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
-        actual = global_pairwise_align_protein(
-            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=10.,
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
+            Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
             gap_extend_penalty=5.)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
-        self.assertEqual(actual.ids(), list('01'))
 
-        expected = ("HEAGAWGHE-E", "---PAW-HEAE", 30.0)
+        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
+                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(obs_score, 23.0)
+        self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
+
         # EMBOSS result: P---AW-HEAE
-        actual = global_pairwise_align_protein(
-            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=5.,
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
+            Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=5.,
             gap_extend_penalty=0.5)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
-        self.assertEqual(actual.ids(), list('01'))
-
-        # Protein (rather than str) as input
-        expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
-        actual = global_pairwise_align_protein(
+
+        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHE-E"),
+                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(obs_score, 30.0)
+        self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
+
+        # Protein sequences with metadata
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
             Protein("HEAGAWGHEE", metadata={'id': "s1"}),
             Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
-        self.assertEqual(actual.ids(), ["s1", "s2"])
-
-        # One Alignment and one Protein as input
-        expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
-        actual = global_pairwise_align_protein(
-            Alignment([Protein("HEAGAWGHEE", metadata={'id': "s1"})]),
+
+        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
+                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(obs_score, 23.0)
+        self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
+
+        # One TabularMSA and one Protein as input
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
+            TabularMSA([Protein("HEAGAWGHEE", metadata={'id': "s1"})]),
             Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
-        self.assertEqual(actual.ids(), ["s1", "s2"])
+
+        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
+                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(obs_score, 23.0)
+        self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
 
         # One single-sequence alignment as input and one double-sequence
         # alignment as input. Score confirmed manually.
-        expected = ("HEAGAWGHEE-", "HDAGAWGHDE-", "---PAW-HEAE", 21.0)
-        actual = global_pairwise_align_protein(
-            Alignment([Protein("HEAGAWGHEE", metadata={'id': "s1"}),
-                       Protein("HDAGAWGHDE", metadata={'id': "s2"})]),
-            Alignment([Protein("PAWHEAE", metadata={'id': "s3"})]),
-            gap_open_penalty=10., gap_extend_penalty=5.)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(str(actual[2]), expected[2])
-        self.assertEqual(actual.score(), expected[3])
-        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
-        self.assertEqual(actual.ids(), ["s1", "s2", "s3"])
-
-        # ids are provided if they're not passed in
-        actual = global_pairwise_align_protein(
-            Protein("HEAGAWGHEE"),
-            Protein("PAWHEAE"),
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
+            TabularMSA([Protein("HEAGAWGHEE", metadata={'id': "s1"}),
+                        Protein("HDAGAWGHDE", metadata={'id': "s2"})]),
+            TabularMSA([Protein("PAWHEAE", metadata={'id': "s3"})]),
             gap_open_penalty=10., gap_extend_penalty=5.)
-        self.assertEqual(actual.ids(), list('01'))
+
+        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
+                                              Protein("HDAGAWGHDE-"),
+                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(obs_score, 21.0)
+        self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
 
         # TypeError on invalid input
         self.assertRaises(TypeError, global_pairwise_align_protein,
-                          42, "HEAGAWGHEE")
+                          42, Protein("HEAGAWGHEE"))
         self.assertRaises(TypeError, global_pairwise_align_protein,
-                          "HEAGAWGHEE", 42)
+                          Protein("HEAGAWGHEE"), 42)
+
+    def test_global_pairwise_align_protein_invalid_dtype(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   "TabularMSA with Protein dtype.*dtype "
+                                   "'DNA'"):
+            global_pairwise_align_protein(TabularMSA([Protein('PAW')]),
+                                          TabularMSA([DNA('ACGT')]))
 
     def test_global_pairwise_align_protein_penalize_terminal_gaps(self):
-        expected = ("HEAGAWGHEE", "---PAWHEAE", 1.0)
-        actual = global_pairwise_align_protein(
-            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=10.,
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
+            Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
             gap_extend_penalty=5., penalize_terminal_gaps=True)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
-        self.assertEqual(actual.ids(), list('01'))
+
+        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE"),
+                                              Protein("---PAWHEAE")]))
+        self.assertEqual(obs_score, 1.0)
+        self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
 
     def test_global_pairwise_align_nucleotide_penalize_terminal_gaps(self):
         # in these tests one sequence is about 3x the length of the other.
         # we toggle penalize_terminal_gaps to confirm that it results in
         # different alignments and alignment scores.
-        seq1 = "ACCGTGGACCGTTAGGATTGGACCCAAGGTTG"
-        seq2 = "T"*25 + "ACCGTGGACCGTAGGATTGGACCAAGGTTA" + "A"*25
-
-        aln1 = ("-------------------------ACCGTGGACCGTTAGGA"
-                "TTGGACCCAAGGTTG-------------------------")
-        aln2 = ("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
-                "TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")
-        expected = (aln1, aln2, 131.0)
-        actual = global_pairwise_align_nucleotide(
+        seq1 = DNA("ACCGTGGACCGTTAGGATTGGACCCAAGGTTG")
+        seq2 = DNA("T"*25 + "ACCGTGGACCGTAGGATTGGACCAAGGTTA" + "A"*25)
+
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
             seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
             match_score=5, mismatch_score=-4, penalize_terminal_gaps=False)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-
-        aln1 = ("-------------------------ACCGTGGACCGTTAGGA"
-                "TTGGACCCAAGGTT-------------------------G")
-        aln2 = ("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
-                "TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")
-        expected = (aln1, aln2, 97.0)
-        actual = global_pairwise_align_nucleotide(
+
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([DNA("-------------------------ACCGTGGACCGTTAGGA"
+                            "TTGGACCCAAGGTTG-------------------------"),
+                        DNA("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
+                            "TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")]))
+        self.assertEqual(obs_score, 131.0)
+
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
             seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
             match_score=5, mismatch_score=-4, penalize_terminal_gaps=True)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
+
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([DNA("-------------------------ACCGTGGACCGTTAGGA"
+                            "TTGGACCCAAGGTT-------------------------G"),
+                        DNA("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
+                            "TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")]))
+        self.assertEqual(obs_score, 97.0)
 
     def test_local_pairwise_align_protein(self):
-        expected = ("AWGHE", "AW-HE", 26.0, 4, 1)
-        actual = local_pairwise_align_protein(
-            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=10.,
+        obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
+            Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
             gap_extend_penalty=5.)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(4, 8), (1, 4)])
-        self.assertEqual(actual.ids(), list('01'))
-
-        expected = ("AWGHE-E", "AW-HEAE", 32.0, 4, 1)
-        actual = local_pairwise_align_protein(
-            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=5.,
+
+        self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE"),
+                                              Protein("AW-HE")]))
+        self.assertEqual(obs_score, 26.0)
+        self.assertEqual(obs_start_end, [(4, 8), (1, 4)])
+
+        obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
+            Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=5.,
             gap_extend_penalty=0.5)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(4, 9), (1, 6)])
-        self.assertEqual(actual.ids(), list('01'))
-
-        expected = ("AWGHE", "AW-HE", 26.0, 4, 1)
-        # Protein (rather than str) as input
-        actual = local_pairwise_align_protein(
+
+        self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE-E"),
+                                              Protein("AW-HEAE")]))
+        self.assertEqual(obs_score, 32.0)
+        self.assertEqual(obs_start_end, [(4, 9), (1, 6)])
+
+        # Protein sequences with metadata
+        obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
             Protein("HEAGAWGHEE", metadata={'id': "s1"}),
             Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(4, 8), (1, 4)])
-        self.assertEqual(actual.ids(), ["s1", "s2"])
 
-        # Fails when either input is passed as an Alignment
+        self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE"),
+                                              Protein("AW-HE")]))
+        self.assertEqual(obs_score, 26.0)
+        self.assertEqual(obs_start_end, [(4, 8), (1, 4)])
+
+        # Fails when either input is passed as a TabularMSA
         self.assertRaises(TypeError, local_pairwise_align_protein,
-                          Alignment([Protein("HEAGAWGHEE",
-                                             metadata={'id': "s1"})]),
+                          TabularMSA([Protein("HEAGAWGHEE",
+                                      metadata={'id': "s1"})]),
                           Protein("PAWHEAE", metadata={'id': "s2"}),
                           gap_open_penalty=10.,
                           gap_extend_penalty=5.)
         self.assertRaises(TypeError, local_pairwise_align_protein,
                           Protein("HEAGAWGHEE", metadata={'id': "s1"}),
-                          Alignment([Protein("PAWHEAE",
-                                             metadata={'id': "s2"})]),
+                          TabularMSA([Protein("PAWHEAE",
+                                      metadata={'id': "s2"})]),
                           gap_open_penalty=10., gap_extend_penalty=5.)
 
-        # ids are provided if they're not passed in
-        actual = local_pairwise_align_protein(
-            Protein("HEAGAWGHEE"),
-            Protein("PAWHEAE"),
-            gap_open_penalty=10., gap_extend_penalty=5.)
-        self.assertEqual(actual.ids(), list('01'))
-
         # TypeError on invalid input
         self.assertRaises(TypeError, local_pairwise_align_protein,
-                          42, "HEAGAWGHEE")
+                          42, Protein("HEAGAWGHEE"))
         self.assertRaises(TypeError, local_pairwise_align_protein,
-                          "HEAGAWGHEE", 42)
+                          Protein("HEAGAWGHEE"), 42)
 
     def test_global_pairwise_align_nucleotide(self):
-        expected = ("G-ACCTTGACCAGGTACC", "GAACTTTGAC---GTAAC", 41.0, 0, 0)
-        actual = global_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=5.,
-            gap_extend_penalty=0.5, match_score=5, mismatch_score=-4)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
-        self.assertEqual(actual.ids(), list('01'))
-
-        expected = ("-GACCTTGACCAGGTACC", "GAACTTTGAC---GTAAC", 32.0, 0, 0)
-        actual = global_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
-            gap_extend_penalty=0.5, match_score=5, mismatch_score=-4)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
-        self.assertEqual(actual.ids(), list('01'))
-
-        # DNA (rather than str) as input
-        expected = ("-GACCTTGACCAGGTACC", "GAACTTTGAC---GTAAC", 32.0, 0, 0)
-        actual = global_pairwise_align_nucleotide(
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,
+            mismatch_score=-4)
+
+        self.assertEqual(obs_msa, TabularMSA([DNA("G-ACCTTGACCAGGTACC"),
+                                              DNA("GAACTTTGAC---GTAAC")]))
+        self.assertEqual(obs_score, 41.0)
+        self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
+
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
+            mismatch_score=-4)
+
+        self.assertEqual(obs_msa, TabularMSA([DNA("-GACCTTGACCAGGTACC"),
+                                              DNA("GAACTTTGAC---GTAAC")]))
+        self.assertEqual(obs_score, 32.0)
+        self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
+
+        # DNA sequences with metadata
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
             DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
             DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
             mismatch_score=-4)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
-        self.assertEqual(actual.ids(), ["s1", "s2"])
-
-        # Align one DNA sequence and one Alignment, score computed manually
-        expected = ("-GACCTTGACCAGGTACC", "-GACCATGACCAGGTACC",
-                    "GAACTTTGAC---GTAAC", 27.5, 0, 0)
-        actual = global_pairwise_align_nucleotide(
-            Alignment([DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
-                       DNA("GACCATGACCAGGTACC", metadata={'id': "s2"})]),
+
+        self.assertEqual(obs_msa, TabularMSA([DNA("-GACCTTGACCAGGTACC"),
+                                              DNA("GAACTTTGAC---GTAAC")]))
+        self.assertEqual(obs_score, 32.0)
+        self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
+
+        # Align one DNA sequence and one TabularMSA, score computed manually
+        obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
+            TabularMSA([DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
+                        DNA("GACCATGACCAGGTACC", metadata={'id': "s2"})]),
             DNA("GAACTTTGACGTAAC", metadata={'id': "s3"}),
             gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
             mismatch_score=-4)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(str(actual[2]), expected[2])
-        self.assertEqual(actual.score(), expected[3])
-        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
-        self.assertEqual(actual.ids(), ["s1", "s2", "s3"])
-
-        # ids are provided if they're not passed in
-        actual = global_pairwise_align_nucleotide(
-            DNA("GACCTTGACCAGGTACC"),
-            DNA("GAACTTTGACGTAAC"),
-            gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
-            mismatch_score=-4)
-        self.assertEqual(actual.ids(), list('01'))
+
+        self.assertEqual(obs_msa, TabularMSA([DNA("-GACCTTGACCAGGTACC"),
+                                              DNA("-GACCATGACCAGGTACC"),
+                                              DNA("GAACTTTGAC---GTAAC")]))
+        self.assertEqual(obs_score, 27.5)
+        self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
 
         # TypeError on invalid input
         self.assertRaises(TypeError, global_pairwise_align_nucleotide,
-                          42, "HEAGAWGHEE")
+                          42, DNA("ACGT"))
         self.assertRaises(TypeError, global_pairwise_align_nucleotide,
-                          "HEAGAWGHEE", 42)
+                          DNA("ACGT"), 42)
+
+    def test_global_pairwise_align_nucleotide_invalid_dtype(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   "TabularMSA with DNA or RNA dtype.*dtype "
+                                   "'Protein'"):
+            global_pairwise_align_nucleotide(TabularMSA([DNA('ACGT')]),
+                                             TabularMSA([Protein('PAW')]))
 
     def test_local_pairwise_align_nucleotide(self):
-        expected = ("ACCTTGACCAGGTACC", "ACTTTGAC---GTAAC", 41.0, 1, 2)
-        actual = local_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=5.,
-            gap_extend_penalty=0.5, match_score=5, mismatch_score=-4)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(1, 16), (2, 14)])
-        self.assertEqual(actual.ids(), list('01'))
-
-        expected = ("ACCTTGAC", "ACTTTGAC", 31.0, 1, 2)
-        actual = local_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
-            gap_extend_penalty=5., match_score=5, mismatch_score=-4)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(1, 8), (2, 9)])
-        self.assertEqual(actual.ids(), list('01'))
-
-        # DNA (rather than str) as input
-        expected = ("ACCTTGAC", "ACTTTGAC", 31.0, 1, 2)
-        actual = local_pairwise_align_nucleotide(
+        obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,
+            mismatch_score=-4)
+
+        self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGACCAGGTACC"),
+                                              DNA("ACTTTGAC---GTAAC")]))
+        self.assertEqual(obs_score, 41.0)
+        self.assertEqual(obs_start_end, [(1, 16), (2, 14)])
+
+        obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
+            mismatch_score=-4)
+
+        self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGAC"),
+                                              DNA("ACTTTGAC")]))
+        self.assertEqual(obs_score, 31.0)
+        self.assertEqual(obs_start_end, [(1, 8), (2, 9)])
+
+        # DNA sequences with metadata
+        obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
             DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
             DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
             mismatch_score=-4)
-        self.assertEqual(str(actual[0]), expected[0])
-        self.assertEqual(str(actual[1]), expected[1])
-        self.assertEqual(actual.score(), expected[2])
-        self.assertEqual(actual.start_end_positions(), [(1, 8), (2, 9)])
-        self.assertEqual(actual.ids(), ["s1", "s2"])
 
-        # Fails when either input is passed as an Alignment
+        self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGAC"),
+                                              DNA("ACTTTGAC")]))
+        self.assertEqual(obs_score, 31.0)
+        self.assertEqual(obs_start_end, [(1, 8), (2, 9)])
+
+        # Fails when either input is passed as a TabularMSA
         self.assertRaises(TypeError, local_pairwise_align_nucleotide,
-                          Alignment([DNA("GACCTTGACCAGGTACC",
-                                         metadata={'id': "s1"})]),
+                          TabularMSA([DNA("GACCTTGACCAGGTACC",
+                                          metadata={'id': "s1"})]),
                           DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
                           gap_open_penalty=10., gap_extend_penalty=5.,
                           match_score=5, mismatch_score=-4)
         self.assertRaises(TypeError, local_pairwise_align_nucleotide,
                           DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
-                          Alignment([DNA("GAACTTTGACGTAAC",
-                                         metadata={'id': "s2"})]),
+                          TabularMSA([DNA("GAACTTTGACGTAAC",
+                                      metadata={'id': "s2"})]),
                           gap_open_penalty=10., gap_extend_penalty=5.,
                           match_score=5, mismatch_score=-4)
 
-        # ids are provided if they're not passed in
-        actual = local_pairwise_align_nucleotide(
-            DNA("GACCTTGACCAGGTACC"),
-            DNA("GAACTTTGACGTAAC"),
-            gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
-            mismatch_score=-4)
-        self.assertEqual(actual.ids(), list('01'))
-
         # TypeError on invalid input
         self.assertRaises(TypeError, local_pairwise_align_nucleotide,
-                          42, "HEAGAWGHEE")
+                          42, DNA("ACGT"))
         self.assertRaises(TypeError, local_pairwise_align_nucleotide,
-                          "HEAGAWGHEE", 42)
+                          DNA("ACGT"), 42)
 
     def test_nucleotide_aligners_use_substitution_matrices(self):
         alt_sub = make_identity_substitution_matrix(10, -10)
         # alternate substitution matrix yields different alignment (the
         # aligned sequences and the scores are different) with local alignment
-        actual_no_sub = local_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
-            gap_extend_penalty=5., match_score=5, mismatch_score=-4)
-        actual_alt_sub = local_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
-            gap_extend_penalty=5., match_score=5, mismatch_score=-4,
-            substitution_matrix=alt_sub)
-        self.assertNotEqual(str(actual_no_sub[0]), str(actual_alt_sub[0]))
-        self.assertNotEqual(str(actual_no_sub[1]), str(actual_alt_sub[1]))
-        self.assertNotEqual(actual_no_sub.score(),
-                            actual_alt_sub.score())
+        msa_no_sub, score_no_sub, start_end_no_sub = \
+            local_pairwise_align_nucleotide(
+                DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+                gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
+                mismatch_score=-4)
+
+        msa_alt_sub, score_alt_sub, start_end_alt_sub = \
+            local_pairwise_align_nucleotide(
+                DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+                gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
+                mismatch_score=-4, substitution_matrix=alt_sub)
+
+        self.assertNotEqual(msa_no_sub, msa_alt_sub)
+        self.assertNotEqual(score_no_sub, score_alt_sub)
+        self.assertNotEqual(start_end_no_sub, start_end_alt_sub)
 
         # alternate substitution matrix yields different alignment (the
         # aligned sequences and the scores are different) with global alignment
-        actual_no_sub = local_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
-            gap_extend_penalty=5., match_score=5, mismatch_score=-4)
-        actual_alt_sub = global_pairwise_align_nucleotide(
-            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
-            gap_extend_penalty=5., match_score=5, mismatch_score=-4,
-            substitution_matrix=alt_sub)
-        self.assertNotEqual(str(actual_no_sub[0]), str(actual_alt_sub[0]))
-        self.assertNotEqual(str(actual_no_sub[1]), str(actual_alt_sub[1]))
-        self.assertNotEqual(actual_no_sub.score(),
-                            actual_alt_sub.score())
+        msa_no_sub, score_no_sub, start_end_no_sub = \
+            global_pairwise_align_nucleotide(
+                DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+                gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
+                mismatch_score=-4)
+
+        msa_alt_sub, score_alt_sub, start_end_alt_sub = \
+            global_pairwise_align_nucleotide(
+                DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+                gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
+                mismatch_score=-4, substitution_matrix=alt_sub)
+
+        self.assertNotEqual(msa_no_sub, msa_alt_sub)
+        self.assertNotEqual(score_no_sub, score_alt_sub)
+        self.assertEqual(start_end_no_sub, start_end_alt_sub)
+
+    def test_local_pairwise_align_invalid_type(self):
+        with six.assertRaisesRegex(self, TypeError, 'IUPACSequence.*Sequence'):
+            local_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
+
+    def test_local_pairwise_align_type_mismatch(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   "same type: 'DNA' != 'RNA'"):
+            local_pairwise_align(DNA('ACGT'), RNA('ACGU'), 1.0, 1.0, {})
 
     def test_init_matrices_sw(self):
         expected_score_m = np.zeros((5, 4))
@@ -402,8 +476,8 @@ class PairwiseAlignmentTests(TestCase):
                             [0, -1, -1, -1],
                             [0, -1, -1, -1]]
         actual_score_m, actual_tback_m = _init_matrices_sw(
-            Alignment([DNA('AAA', metadata={'id': 'id'})]),
-            Alignment([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
+            TabularMSA([DNA('AAA', metadata={'id': 'id'})]),
+            TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -419,33 +493,46 @@ class PairwiseAlignmentTests(TestCase):
                             [2, -1, -1, -1],
                             [2, -1, -1, -1]]
         actual_score_m, actual_tback_m = _init_matrices_nw(
-            Alignment([DNA('AAA', metadata={'id': 'id'})]),
-            Alignment([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
+            TabularMSA([DNA('AAA', metadata={'id': 'id'})]),
+            TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
     def test_compute_substitution_score(self):
         # these results were computed manually
         subs_m = make_identity_substitution_matrix(5, -4)
+        gap_chars = set('-.')
+
         self.assertEqual(
-            _compute_substitution_score(['A'], ['A'], subs_m, 0), 5.0)
+            _compute_substitution_score(['A'], ['A'], subs_m, 0, gap_chars),
+            5.0)
         self.assertEqual(
-            _compute_substitution_score(['A', 'A'], ['A'], subs_m, 0), 5.0)
+            _compute_substitution_score(['A', 'A'], ['A'], subs_m, 0,
+                                        gap_chars),
+            5.0)
         self.assertEqual(
-            _compute_substitution_score(['A', 'C'], ['A'], subs_m, 0), 0.5)
+            _compute_substitution_score(['A', 'C'], ['A'], subs_m, 0,
+                                        gap_chars),
+            0.5)
         self.assertEqual(
-            _compute_substitution_score(['A', 'C'], ['A', 'C'], subs_m, 0),
+            _compute_substitution_score(['A', 'C'], ['A', 'C'], subs_m, 0,
+                                        gap_chars),
             0.5)
         self.assertEqual(
-            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0),
+            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,
+                                        gap_chars),
             2.5)
         self.assertEqual(
-            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 1), 3)
+            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 1,
+                                        gap_chars),
+            3)
 
         # alt subs_m
         subs_m = make_identity_substitution_matrix(1, -2)
+
         self.assertEqual(
-            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0),
+            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,
+                                        gap_chars),
             0.5)
 
     def test_compute_score_and_traceback_matrices(self):
@@ -462,8 +549,8 @@ class PairwiseAlignmentTests(TestCase):
                             [2, 2, 2, 2]]
         m = make_identity_substitution_matrix(2, -1)
         actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
-            Alignment([DNA('ACG', metadata={'id': 'id'})]),
-            Alignment([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
+            TabularMSA([DNA('ACG', metadata={'id': 'id'})]),
+            TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -481,8 +568,8 @@ class PairwiseAlignmentTests(TestCase):
                             [2, 2, 2, 1]]
         m = make_identity_substitution_matrix(2, -1)
         actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
-            Alignment([DNA('ACC', metadata={'id': 'id'})]),
-            Alignment([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
+            TabularMSA([DNA('ACC', metadata={'id': 'id'})]),
+            TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -500,10 +587,10 @@ class PairwiseAlignmentTests(TestCase):
                             [2, 2, 2, 1]]
         m = make_identity_substitution_matrix(2, -1)
         actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
-            Alignment([DNA('ACC', metadata={'id': 's1'}),
-                       DNA('ACC', metadata={'id': 's2'})]),
-            Alignment([DNA('ACGT', metadata={'id': 's3'}),
-                       DNA('ACGT', metadata={'id': 's4'})]), 5, 2, m)
+            TabularMSA([DNA('ACC', metadata={'id': 's1'}),
+                        DNA('ACC', metadata={'id': 's2'})]),
+            TabularMSA([DNA('ACGT', metadata={'id': 's3'}),
+                        DNA('ACGT', metadata={'id': 's4'})]), 5, 2, m)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -512,8 +599,8 @@ class PairwiseAlignmentTests(TestCase):
         # substitution matrix, an informative error should be raised
         m = make_identity_substitution_matrix(2, -1)
         self.assertRaises(ValueError, _compute_score_and_traceback_matrices,
-                          Alignment([DNA('AWG', metadata={'id': 'id'})]),
-                          Alignment([DNA('ACGT', metadata={'id': 'id'})]),
+                          TabularMSA([DNA('AWG', metadata={'id': 'id'})]),
+                          TabularMSA([DNA('ACGT', metadata={'id': 'id'})]),
                           5, 2, m)
 
     def test_traceback(self):
@@ -530,11 +617,10 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
         # start at bottom-right
-        expected = ([DNA("ACG-", metadata={'id': '0'})],
-                    [DNA("ACGT", metadata={'id': '1'})], 1, 0, 0)
+        expected = ([DNA("ACG-")], [DNA("ACGT")], 1, 0, 0)
         actual = _traceback(tback_m, score_m,
-                            Alignment([DNA('ACG', metadata={'id': ''})]),
-                            Alignment([DNA('ACGT', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACG', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACGT', metadata={'id': ''})]),
                             4, 3)
         self.assertEqual(actual, expected)
 
@@ -552,25 +638,25 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
         # start at bottom-right
-        expected = ([DNA("ACG-", metadata={'id': 's1'}),
-                     DNA("ACG-", metadata={'id': 's2'})],
-                    [DNA("ACGT", metadata={'id': 's3'}),
-                     DNA("ACGT", metadata={'id': 's4'})],
+        expected = ([DNA("ACG-"),
+                     DNA("ACG-")],
+                    [DNA("ACGT"),
+                     DNA("ACGT")],
                     1, 0, 0)
         actual = _traceback(tback_m, score_m,
-                            Alignment([DNA('ACG', metadata={'id': 's1'}),
-                                       DNA('ACG', metadata={'id': 's2'})]),
-                            Alignment([DNA('ACGT', metadata={'id': 's3'}),
-                                       DNA('ACGT', metadata={'id': 's4'})]),
+                            TabularMSA([DNA('ACG', metadata={'id': 's1'}),
+                                        DNA('ACG', metadata={'id': 's2'})]),
+                            TabularMSA([DNA('ACGT', metadata={'id': 's3'}),
+                                        DNA('ACGT', metadata={'id': 's4'})]),
                             4, 3)
         self.assertEqual(actual, expected)
 
         # start at highest-score
-        expected = ([DNA("ACG", metadata={'id': '0'})],
-                    [DNA("ACG", metadata={'id': '1'})], 6, 0, 0)
+        expected = ([DNA("ACG")],
+                    [DNA("ACG")], 6, 0, 0)
         actual = _traceback(tback_m, score_m,
-                            Alignment([DNA('ACG', metadata={'id': ''})]),
-                            Alignment([DNA('ACGT', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACG', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACGT', metadata={'id': ''})]),
                             3, 3)
         self.assertEqual(actual, expected)
 
@@ -581,24 +667,14 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 1],
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
-        expected = ("G", "G", 6, 2, 2)
-        expected = ([DNA("G", metadata={'id': '0'})],
-                    [DNA("G", metadata={'id': '1'})], 6, 2, 2)
+        expected = ([DNA("G")],
+                    [DNA("G")], 6, 2, 2)
         actual = _traceback(tback_m, score_m,
-                            Alignment([DNA('ACG', metadata={'id': ''})]),
-                            Alignment([DNA('ACGT', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACG', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACGT', metadata={'id': ''})]),
                             3, 3)
         self.assertEqual(actual, expected)
 
-    def test_get_seq_id(self):
-        self.assertEqual(_get_seq_id(DNA("AAA"), "hello"), "hello")
-        self.assertEqual(_get_seq_id(DNA("AAA", metadata={'id': "s1"}),
-                                     "hello"), "s1")
-        self.assertEqual(_get_seq_id(DNA("AAA", metadata={'id': None}),
-                                     "hello"), "hello")
-        self.assertEqual(_get_seq_id(DNA("AAA", metadata={'id': '\t'}),
-                                     "hello"), "hello")
-
     def test_first_largest(self):
         l = [(5, 'a'), (5, 'b'), (5, 'c')]
         self.assertEqual(_first_largest(l), (5, 'a'))
diff --git a/skbio/alignment/tests/test_ssw.py b/skbio/alignment/tests/test_ssw.py
index 34e37da..fe55113 100644
--- a/skbio/alignment/tests/test_ssw.py
+++ b/skbio/alignment/tests/test_ssw.py
@@ -20,7 +20,10 @@ from __future__ import absolute_import, division, print_function
 
 from unittest import TestCase, main
 
-from skbio import local_pairwise_align_ssw, Sequence, DNA
+import six
+
+from skbio import (local_pairwise_align_ssw, Sequence, DNA, RNA, Protein,
+                   TabularMSA)
 from skbio.alignment import StripedSmithWaterman, AlignmentStructure
 from skbio.alignment._pairwise import blosum50
 
@@ -558,15 +561,20 @@ class TestStripedSmithWaterman(TestSSW):
 
 
 class TestAlignStripedSmithWaterman(TestSSW):
-    def _check_Alignment_to_AlignmentStructure(self, alignment, structure):
-        self.assertEqual(alignment.score(), structure.optimal_alignment_score)
-        self.assertEqual(str(alignment[0]), structure.aligned_query_sequence)
-        self.assertEqual(str(alignment[1]), structure.aligned_target_sequence)
+    def _check_TabularMSA_to_AlignmentStructure(self, alignment, structure,
+                                                expected_dtype):
+        msa, score, start_end = alignment
+
+        self.assertEqual(score, structure.optimal_alignment_score)
+        self.assertEqual(
+            msa,
+            TabularMSA([expected_dtype(structure.aligned_query_sequence),
+                        expected_dtype(structure.aligned_target_sequence)]))
         if structure.query_begin == -1:
-            self.assertEqual(alignment.start_end_positions(), None)
+            self.assertEqual(start_end, None)
         else:
             for (start, end), (expected_start, expected_end) in \
-                zip(alignment.start_end_positions(),
+                zip(start_end,
                     [(structure.query_begin,
                       structure.query_end),
                      (structure.target_begin,
@@ -574,14 +582,26 @@ class TestAlignStripedSmithWaterman(TestSSW):
                 self.assertEqual(start, expected_start)
                 self.assertEqual(end, expected_end)
 
-    def test_same_as_using_StripedSmithWaterman_object(self):
+    def test_same_as_using_StripedSmithWaterman_object_DNA(self):
         query_sequence = 'ATGGAAGCTATAAGCGCGGGTGAG'
         target_sequence = 'AACTTATATAATAAAAATTATATATTCGTTGGGTTCTTTTGATATAAATC'
         query = StripedSmithWaterman(query_sequence)
         align1 = query(target_sequence)
-        align2 = local_pairwise_align_ssw(query_sequence,
-                                          target_sequence)
-        self._check_Alignment_to_AlignmentStructure(align2, align1)
+        align2 = local_pairwise_align_ssw(DNA(query_sequence),
+                                          DNA(target_sequence))
+        self._check_TabularMSA_to_AlignmentStructure(align2, align1, DNA)
+
+    def test_same_as_using_StripedSmithWaterman_object_Protein(self):
+        query_sequence = 'HEAGAWGHEE'
+        target_sequence = 'PAWHEAE'
+        query = StripedSmithWaterman(query_sequence,
+                                     protein=True,
+                                     substitution_matrix=blosum50)
+        align1 = query(target_sequence)
+        align2 = local_pairwise_align_ssw(Protein(query_sequence),
+                                          Protein(target_sequence),
+                                          substitution_matrix=blosum50)
+        self._check_TabularMSA_to_AlignmentStructure(align2, align1, Protein)
 
     def test_kwargs_are_usable(self):
         kwargs = {}
@@ -591,20 +611,21 @@ class TestAlignStripedSmithWaterman(TestSSW):
         target_sequence = 'TACTTATAAGATGTCTCAACGGCATGCGCAACTTGTGAAGTG'
         query = StripedSmithWaterman(query_sequence, **kwargs)
         align1 = query(target_sequence)
-        align2 = local_pairwise_align_ssw(query_sequence,
-                                          target_sequence, **kwargs)
-        self._check_Alignment_to_AlignmentStructure(align2, align1)
+        align2 = local_pairwise_align_ssw(DNA(query_sequence),
+                                          DNA(target_sequence), **kwargs)
+        self._check_TabularMSA_to_AlignmentStructure(align2, align1, DNA)
 
-    def test_constructor(self):
-        query_sequence = 'AGGGTAATTAGGCGTGTTCACCTA'
-        target_sequence = 'TACTTATAAGATGTCTCAACGGCATGCGCAACTTGTGAAGTG'
+    def test_invalid_type(self):
+        with six.assertRaisesRegex(self, TypeError, "not type 'Sequence'"):
+            local_pairwise_align_ssw(DNA('ACGT'), Sequence('ACGT'))
 
-        align1 = local_pairwise_align_ssw(query_sequence, target_sequence)
-        align2 = local_pairwise_align_ssw(query_sequence, target_sequence,
-                                          constructor=DNA)
+        with six.assertRaisesRegex(self, TypeError, "not type 'str'"):
+            local_pairwise_align_ssw('ACGU', RNA('ACGU'))
 
-        self.assertEqual(type(align1[0]), Sequence)
-        self.assertEqual(type(align2[0]), DNA)
+    def test_type_mismatch(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   "same type: 'DNA' != 'RNA'"):
+            local_pairwise_align_ssw(DNA('ACGT'), RNA('ACGU'))
 
 
 class TestAlignmentStructure(TestSSW):
diff --git a/skbio/alignment/tests/test_tabular_msa.py b/skbio/alignment/tests/test_tabular_msa.py
new file mode 100644
index 0000000..595994a
--- /dev/null
+++ b/skbio/alignment/tests/test_tabular_msa.py
@@ -0,0 +1,3676 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import copy
+import unittest
+import functools
+import itertools
+import types
+
+import six
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+import scipy.stats
+
+from skbio import Sequence, DNA, RNA, Protein, TabularMSA
+from skbio.sequence._iupac_sequence import IUPACSequence
+from skbio.util._decorator import classproperty, overrides
+from skbio.util._testing import (ReallyEqualMixin, MetadataMixinTests,
+                                 PositionalMetadataMixinTests,
+                                 assert_index_equal,
+                                 assert_data_frame_almost_equal)
+
+
+class TabularMSASubclass(TabularMSA):
+    """Used for testing purposes."""
+    pass
+
+
+class TestTabularMSAMetadata(unittest.TestCase, ReallyEqualMixin,
+                             MetadataMixinTests):
+    def setUp(self):
+        self._metadata_constructor_ = functools.partial(TabularMSA, [])
+
+
+class TestTabularMSAPositionalMetadata(unittest.TestCase, ReallyEqualMixin,
+                                       PositionalMetadataMixinTests):
+    def setUp(self):
+        def factory(axis_len, positional_metadata=None):
+            return TabularMSA([DNA('A' * axis_len)],
+                              positional_metadata=positional_metadata)
+        self._positional_metadata_constructor_ = factory
+
+
+class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
+    def test_from_dict_empty(self):
+        self.assertEqual(TabularMSA.from_dict({}), TabularMSA([], index=[]))
+
+    def test_from_dict_single_sequence(self):
+        self.assertEqual(TabularMSA.from_dict({'foo': DNA('ACGT')}),
+                         TabularMSA([DNA('ACGT')], index=['foo']))
+
+    def test_from_dict_multiple_sequences(self):
+        msa = TabularMSA.from_dict(
+            {1: DNA('ACG'), 2: DNA('GGG'), 3: DNA('TAG')})
+        # Sort because order is arbitrary.
+        msa.sort()
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACG'), DNA('GGG'), DNA('TAG')], index=[1, 2, 3]))
+
+    def test_from_dict_invalid_input(self):
+        # Basic test to make sure error-checking in the TabularMSA constructor
+        # is being invoked.
+        with six.assertRaisesRegex(
+                self, ValueError, 'must match the number of positions'):
+            TabularMSA.from_dict({'a': DNA('ACG'), 'b': DNA('ACGT')})
+
+    def test_constructor_invalid_dtype(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   'IUPACSequence.*Sequence'):
+            TabularMSA([Sequence('')])
+
+        with six.assertRaisesRegex(self, TypeError, 'IUPACSequence.*int'):
+            TabularMSA([42, DNA('')])
+
+    def test_constructor_not_monomorphic(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   'matching type.*RNA.*DNA'):
+            TabularMSA([DNA(''), RNA('')])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'matching type.*float.*Protein'):
+            TabularMSA([Protein(''), Protein(''), 42.0, Protein('')])
+
+    def test_constructor_unequal_length(self):
+        with six.assertRaisesRegex(
+                self, ValueError,
+                'must match the number of positions.*1 != 0'):
+            TabularMSA([Protein(''), Protein('P')])
+
+        with six.assertRaisesRegex(
+                self, ValueError,
+                'must match the number of positions.*1 != 3'):
+            TabularMSA([Protein('PAW'), Protein('ABC'), Protein('A')])
+
+    def test_constructor_non_iterable(self):
+        with self.assertRaises(TypeError):
+            TabularMSA(42)
+
+    def test_constructor_non_unique_labels(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('ACGT')], index=[1, 1])
+
+        assert_index_equal(msa.index, pd.Int64Index([1, 1]))
+
+    def test_constructor_minter_and_index_both_provided(self):
+        with six.assertRaisesRegex(self, ValueError, 'both.*minter.*index'):
+            TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str,
+                       index=['a', 'b'])
+
+    def test_constructor_index_length_mismatch_iterable(self):
+        with six.assertRaisesRegex(self, ValueError,
+                                   'sequences.*2.*index length.*0'):
+            TabularMSA([DNA('ACGT'), DNA('TGCA')], index=iter([]))
+
+    def test_constructor_index_length_mismatch_index_object(self):
+        with six.assertRaisesRegex(self, ValueError,
+                                   'sequences.*2.*index length.*0'):
+            TabularMSA([DNA('ACGT'), DNA('TGCA')], index=pd.Index([]))
+
+    def test_constructor_empty_no_index(self):
+        # sequence empty
+        msa = TabularMSA([])
+        self.assertIsNone(msa.dtype)
+        self.assertEqual(msa.shape, (0, 0))
+        assert_index_equal(msa.index, pd.Index([]))
+        with self.assertRaises(StopIteration):
+            next(iter(msa))
+
+        # position empty
+        seqs = [DNA(''), DNA('')]
+        msa = TabularMSA(seqs)
+        self.assertIs(msa.dtype, DNA)
+        self.assertEqual(msa.shape, (2, 0))
+        assert_index_equal(msa.index, pd.Int64Index([0, 1]))
+        self.assertEqual(list(msa), seqs)
+
+    def test_constructor_empty_with_labels(self):
+        # sequence empty
+        msa = TabularMSA([], minter=lambda x: x)
+        assert_index_equal(msa.index, pd.Index([]))
+
+        msa = TabularMSA([], index=iter([]))
+        assert_index_equal(msa.index, pd.Index([]))
+
+        # position empty
+        msa = TabularMSA([DNA('', metadata={'id': 42}),
+                          DNA('', metadata={'id': 43})], minter='id')
+        assert_index_equal(msa.index, pd.Index([42, 43]))
+
+        msa = TabularMSA([DNA(''), DNA('')], index=iter([42, 43]))
+        assert_index_equal(msa.index, pd.Index([42, 43]))
+
+    def test_constructor_non_empty_no_labels_provided(self):
+        # 1x3
+        seqs = [DNA('ACG')]
+        msa = TabularMSA(seqs)
+        self.assertIs(msa.dtype, DNA)
+        self.assertEqual(msa.shape, (1, 3))
+        assert_index_equal(msa.index, pd.Index([0]))
+        self.assertEqual(list(msa), seqs)
+
+        # 3x1
+        seqs = [DNA('A'), DNA('C'), DNA('G')]
+        msa = TabularMSA(seqs)
+        self.assertIs(msa.dtype, DNA)
+        self.assertEqual(msa.shape, (3, 1))
+        assert_index_equal(msa.index, pd.Index([0, 1, 2]))
+        self.assertEqual(list(msa), seqs)
+
+    def test_constructor_non_empty_with_labels_provided(self):
+        seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
+        msa = TabularMSA(seqs, minter=str)
+        self.assertIs(msa.dtype, DNA)
+        self.assertEqual(msa.shape, (3, 3))
+        assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
+        self.assertEqual(list(msa), seqs)
+
+        msa = TabularMSA(seqs, index=iter([42, 43, 44]))
+        assert_index_equal(msa.index, pd.Index([42, 43, 44]))
+
+    def test_constructor_works_with_iterator(self):
+        seqs = [DNA('ACG'), DNA('CGA'), DNA('GTT')]
+        msa = TabularMSA(iter(seqs), minter=str)
+        self.assertIs(msa.dtype, DNA)
+        self.assertEqual(msa.shape, (3, 3))
+        assert_index_equal(msa.index, pd.Index(['ACG', 'CGA', 'GTT']))
+        self.assertEqual(list(msa), seqs)
+
+    def test_constructor_with_multiindex_index(self):
+        msa = TabularMSA([DNA('AA'), DNA('GG')],
+                         index=[('foo', 42), ('bar', 43)])
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+
+    def test_constructor_with_multiindex_minter(self):
+        def multiindex_minter(seq):
+            if str(seq) == 'AC':
+                return ('foo', 42)
+            else:
+                return ('bar', 43)
+
+        msa = TabularMSA([DNA('AC'), DNA('GG')], minter=multiindex_minter)
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+
+    def test_copy_constructor_handles_missing_metadata_efficiently(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('----')])
+
+        copy = TabularMSA(msa)
+
+        self.assertIsNone(msa._metadata)
+        self.assertIsNone(msa._positional_metadata)
+        self.assertIsNone(copy._metadata)
+        self.assertIsNone(copy._positional_metadata)
+
+    def test_copy_constructor_with_metadata(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('----')],
+                         metadata={'foo': 42},
+                         positional_metadata={'bar': range(4)},
+                         index=['idx1', 'idx2'])
+
+        copy = TabularMSA(msa)
+
+        self.assertEqual(msa, copy)
+        self.assertIsNot(msa, copy)
+        self.assertIsNot(msa.metadata, copy.metadata)
+        self.assertIsNot(msa.positional_metadata, copy.positional_metadata)
+        self.assertIsNot(msa.index, copy.index)
+
+    def test_copy_constructor_state_override_with_minter(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('----')],
+                         metadata={'foo': 42},
+                         positional_metadata={'bar': range(4)},
+                         index=['idx1', 'idx2'])
+
+        copy = TabularMSA(msa, metadata={'foo': 43},
+                          positional_metadata={'bar': range(4, 8)},
+                          minter=str)
+
+        self.assertNotEqual(msa, copy)
+
+        self.assertEqual(
+            copy,
+            TabularMSA([DNA('ACGT'),
+                        DNA('----')],
+                       metadata={'foo': 43},
+                       positional_metadata={'bar': range(4, 8)},
+                       minter=str))
+
+    def test_copy_constructor_state_override_with_index(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('----')],
+                         metadata={'foo': 42},
+                         positional_metadata={'bar': range(4)},
+                         index=['idx1', 'idx2'])
+
+        copy = TabularMSA(msa, metadata={'foo': 43},
+                          positional_metadata={'bar': range(4, 8)},
+                          index=['a', 'b'])
+
+        self.assertNotEqual(msa, copy)
+
+        self.assertEqual(
+            copy,
+            TabularMSA([DNA('ACGT'),
+                        DNA('----')],
+                       metadata={'foo': 43},
+                       positional_metadata={'bar': range(4, 8)},
+                       index=['a', 'b']))
+
+    def test_dtype(self):
+        self.assertIsNone(TabularMSA([]).dtype)
+        self.assertIs(TabularMSA([Protein('')]).dtype, Protein)
+
+        with self.assertRaises(AttributeError):
+            TabularMSA([]).dtype = DNA
+
+        with self.assertRaises(AttributeError):
+            del TabularMSA([]).dtype
+
+    def test_shape(self):
+        shape = TabularMSA([DNA('ACG'), DNA('GCA')]).shape
+        self.assertEqual(shape, (2, 3))
+        self.assertEqual(shape.sequence, shape[0])
+        self.assertEqual(shape.position, shape[1])
+        with self.assertRaises(TypeError):
+            shape[0] = 3
+
+        with self.assertRaises(AttributeError):
+            TabularMSA([]).shape = (3, 3)
+
+        with self.assertRaises(AttributeError):
+            del TabularMSA([]).shape
+
+    def test_index_getter(self):
+        index = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')], minter=str).index
+        self.assertIsInstance(index, pd.Index)
+        assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
+
+        # immutable
+        with self.assertRaises(TypeError):
+            index[1] = 'AA'
+        # original state is maintained
+        assert_index_equal(index, pd.Index(['AC', 'AG', 'AT']))
+
+    def test_index_mixed_type(self):
+        msa = TabularMSA([DNA('AC'), DNA('CA'), DNA('AA')],
+                         index=['abc', 'd', 42])
+
+        assert_index_equal(msa.index, pd.Index(['abc', 'd', 42]))
+
+    def test_index_setter_empty(self):
+        msa = TabularMSA([])
+        msa.index = iter([])
+        assert_index_equal(msa.index, pd.Index([]))
+
+    def test_index_setter_non_empty(self):
+        msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
+        msa.index = range(3)
+        assert_index_equal(msa.index, pd.Index([0, 1, 2]))
+        msa.index = range(3, 6)
+        assert_index_equal(msa.index, pd.Index([3, 4, 5]))
+
+    def test_index_setter_length_mismatch(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
+        index = pd.Index(['ACGT', 'TGCA'])
+        assert_index_equal(msa.index, index)
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'Length mismatch.*2.*3'):
+            msa.index = iter(['ab', 'cd', 'ef'])
+
+        # original state is maintained
+        assert_index_equal(msa.index, index)
+
+    def test_index_setter_non_unique_index(self):
+        msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
+
+        msa.index = ['1', '1']
+
+        self.assertEqual(msa, TabularMSA([RNA('UUU'), RNA('AAA')],
+                                         index=['1', '1']))
+
+    def test_index_setter_tuples(self):
+        msa = TabularMSA([RNA('UUU'), RNA('AAA')])
+
+        msa.index = [('foo', 42), ('bar', 43)]
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(
+            msa.index,
+            pd.Index([('foo', 42), ('bar', 43)], tupleize_cols=True))
+
+    def test_index_deleter(self):
+        msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
+        assert_index_equal(msa.index, pd.Index(['UUU', 'AAA']))
+        del msa.index
+        assert_index_equal(msa.index, pd.Index([0, 1]))
+
+    def test_bool(self):
+        self.assertFalse(TabularMSA([]))
+        self.assertFalse(TabularMSA([RNA('')]))
+        self.assertFalse(
+            TabularMSA([RNA('', metadata={'id': 1}),
+                        RNA('', metadata={'id': 2})], minter='id'))
+
+        self.assertTrue(TabularMSA([RNA('U')]))
+        self.assertTrue(TabularMSA([RNA('--'), RNA('..')]))
+        self.assertTrue(TabularMSA([RNA('AUC'), RNA('GCA')]))
+
+    def test_len(self):
+        self.assertEqual(len(TabularMSA([])), 0)
+        self.assertEqual(len(TabularMSA([DNA('')])), 1)
+        self.assertEqual(len(TabularMSA([DNA('AT'), DNA('AG'), DNA('AT')])), 3)
+
+    def test_iter(self):
+        with self.assertRaises(StopIteration):
+            next(iter(TabularMSA([])))
+
+        seqs = [DNA(''), DNA('')]
+        self.assertEqual(list(iter(TabularMSA(seqs))), seqs)
+
+        seqs = [DNA('AAA'), DNA('GCT')]
+        self.assertEqual(list(iter(TabularMSA(seqs))), seqs)
+
+    def test_reversed(self):
+        with self.assertRaises(StopIteration):
+            next(reversed(TabularMSA([])))
+
+        seqs = [DNA(''), DNA('', metadata={'id': 42})]
+        self.assertEqual(list(reversed(TabularMSA(seqs))), seqs[::-1])
+
+        seqs = [DNA('AAA'), DNA('GCT')]
+        self.assertEqual(list(reversed(TabularMSA(seqs))), seqs[::-1])
+
+    def test_eq_and_ne(self):
+        # Each element contains the components necessary to construct a
+        # TabularMSA object: seqs and kwargs. None of these objects (once
+        # constructed) should compare equal to one another.
+        components = [
+            # empties
+            ([], {}),
+            ([RNA('')], {}),
+            ([RNA('')], {'minter': str}),
+
+            # 1x1
+            ([RNA('U')], {'minter': str}),
+
+            # 2x3
+            ([RNA('AUG'), RNA('GUA')], {'minter': str}),
+
+            ([RNA('AG'), RNA('GG')], {}),
+            # has labels
+            ([RNA('AG'), RNA('GG')], {'minter': str}),
+            # different dtype
+            ([DNA('AG'), DNA('GG')], {'minter': str}),
+            # different labels
+            ([RNA('AG'), RNA('GG')], {'minter': lambda x: str(x) + '42'}),
+            # different sequence metadata
+            ([RNA('AG', metadata={'id': 42}), RNA('GG')], {'minter': str}),
+            # different sequence data, same labels
+            ([RNA('AG'), RNA('GA')],
+             {'minter': lambda x: 'AG' if 'AG' in x else 'GG'}),
+            # different MSA metadata
+            ([RNA('AG'), RNA('GG')], {'metadata': {'foo': 42}}),
+            ([RNA('AG'), RNA('GG')], {'metadata': {'foo': 43}}),
+            ([RNA('AG'), RNA('GG')], {'metadata': {'foo': 42, 'bar': 43}}),
+            # different MSA positional metadata
+            ([RNA('AG'), RNA('GG')],
+             {'positional_metadata': {'foo': [42, 43]}}),
+            ([RNA('AG'), RNA('GG')],
+             {'positional_metadata': {'foo': [43, 44]}}),
+            ([RNA('AG'), RNA('GG')],
+             {'positional_metadata': {'foo': [42, 43], 'bar': [43, 44]}}),
+        ]
+
+        for seqs, kwargs in components:
+            obj = TabularMSA(seqs, **kwargs)
+            self.assertReallyEqual(obj, obj)
+            self.assertReallyEqual(obj, TabularMSA(seqs, **kwargs))
+            self.assertReallyEqual(obj, TabularMSASubclass(seqs, **kwargs))
+
+        for (seqs1, kwargs1), (seqs2, kwargs2) in \
+                itertools.combinations(components, 2):
+            obj1 = TabularMSA(seqs1, **kwargs1)
+            obj2 = TabularMSA(seqs2, **kwargs2)
+            self.assertReallyNotEqual(obj1, obj2)
+            self.assertReallyNotEqual(obj1,
+                                      TabularMSASubclass(seqs2, **kwargs2))
+
+        # completely different types
+        msa = TabularMSA([])
+        self.assertReallyNotEqual(msa, 42)
+        self.assertReallyNotEqual(msa, [])
+        self.assertReallyNotEqual(msa, {})
+        self.assertReallyNotEqual(msa, '')
+
+    def test_eq_constructed_from_different_iterables_compare_equal(self):
+        msa1 = TabularMSA([DNA('ACGT')])
+        msa2 = TabularMSA((DNA('ACGT'),))
+        self.assertReallyEqual(msa1, msa2)
+
+    def test_eq_ignores_minter_str_and_lambda(self):
+        # as long as the labels generated by the minters are the same, it
+        # doesn't matter whether the minters are equal.
+        msa1 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], minter='id')
+        msa2 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})],
+                          minter=lambda x: x.metadata['id'])
+        self.assertReallyEqual(msa1, msa2)
+
+    def test_eq_minter_and_index(self):
+        # as long as the labels generated by the minters are the same, it
+        # doesn't matter whether the minters are equal.
+        msa1 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], index=['a'])
+        msa2 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], minter='id')
+        self.assertReallyEqual(msa1, msa2)
+
+    def test_reassign_index_empty(self):
+        # sequence empty
+        msa = TabularMSA([])
+        msa.reassign_index()
+        self.assertEqual(msa, TabularMSA([]))
+        assert_index_equal(msa.index, pd.Int64Index([]))
+
+        msa.reassign_index(minter=str)
+        self.assertEqual(msa, TabularMSA([], minter=str))
+        assert_index_equal(msa.index, pd.Index([]))
+
+        # position empty
+        msa = TabularMSA([DNA('')])
+        msa.reassign_index()
+        self.assertEqual(msa, TabularMSA([DNA('')]))
+        assert_index_equal(msa.index, pd.Index([0]))
+
+        msa.reassign_index(minter=str)
+        self.assertEqual(msa, TabularMSA([DNA('')], minter=str))
+        assert_index_equal(msa.index, pd.Index(['']))
+
+    def test_reassign_index_non_empty(self):
+        msa = TabularMSA([DNA('ACG', metadata={'id': 1}),
+                          DNA('AAA', metadata={'id': 2})], minter=str)
+        assert_index_equal(msa.index, pd.Index(['ACG', 'AAA']))
+
+        msa.reassign_index(minter='id')
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACG', metadata={'id': 1}),
+                        DNA('AAA', metadata={'id': 2})], minter='id'))
+        assert_index_equal(msa.index, pd.Index([1, 2]))
+
+        msa.reassign_index(mapping={1: 5})
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACG', metadata={'id': 1}),
+                        DNA('AAA', metadata={'id': 2})], index=[5, 2]))
+        assert_index_equal(msa.index, pd.Index([5, 2]))
+
+        msa.reassign_index()
+        assert_index_equal(msa.index, pd.Index([0, 1]))
+
+    def test_reassign_index_minter_and_mapping_both_provided(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'both.*mapping.*minter.*'):
+            msa.reassign_index(minter=str, mapping={"ACGT": "fleventy"})
+
+        # original state is maintained
+        assert_index_equal(msa.index, pd.Index(['ACGT', 'TGCA']))
+
+    def test_reassign_index_with_mapping_dict_empty(self):
+        seqs = [DNA("A"), DNA("C"), DNA("G")]
+        msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
+
+        msa.reassign_index(mapping={})
+        self.assertEqual(msa, TabularMSA(seqs, index=[0.5, 1.5, 2.5]))
+
+    def test_reassign_index_with_mapping_dict_subset(self):
+        seqs = [DNA("A"), DNA("C"), DNA("G")]
+        mapping = {0.5: "a", 2.5: "c"}
+
+        msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
+        msa.reassign_index(mapping=mapping)
+
+        self.assertEqual(msa, TabularMSA(seqs, index=['a', 1.5, 'c']))
+
+    def test_reassign_index_with_mapping_dict_superset(self):
+        seqs = [DNA("A"), DNA("C"), DNA("G")]
+        mapping = {0.5: "a", 1.5: "b", 2.5: "c", 3.5: "d"}
+
+        msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
+        msa.reassign_index(mapping=mapping)
+
+        self.assertEqual(msa, TabularMSA(seqs, index=['a', 'b', 'c']))
+
+    def test_reassign_index_with_mapping_callable(self):
+        seqs = [DNA("A"), DNA("C"), DNA("G")]
+
+        msa = TabularMSA(seqs, index=[0, 1, 2])
+        msa.reassign_index(mapping=str)
+
+        self.assertEqual(msa, TabularMSA(seqs, index=['0', '1', '2']))
+
+    def test_reassign_index_non_unique_existing_index(self):
+        seqs = [DNA("A"), DNA("C"), DNA("G")]
+        mapping = {0.5: "a", 1.5: "b", 2.5: "c", 3.5: "d"}
+
+        msa = TabularMSA(seqs, index=[0.5, 0.5, 0.5])
+        msa.reassign_index(mapping=mapping)
+
+        self.assertEqual(msa, TabularMSA(seqs, index=['a', 'a', 'a']))
+
+    def test_reassign_index_non_unique_new_index(self):
+        seqs = [DNA("A"), DNA("C"), DNA("G")]
+        mapping = {0.5: "a", 1.5: "a", 2.5: "a"}
+
+        msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
+        msa.reassign_index(mapping=mapping)
+
+        self.assertEqual(msa, TabularMSA(seqs, index=['a', 'a', 'a']))
+
+    def test_reassign_index_to_multiindex_with_minter(self):
+        msa = TabularMSA([DNA('AC'), DNA('.G')])
+
+        def multiindex_minter(seq):
+            if str(seq) == 'AC':
+                return ('foo', 42)
+            else:
+                return ('bar', 43)
+
+        msa.reassign_index(minter=multiindex_minter)
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AC'), DNA('.G')],
+                       index=[('foo', 42), ('bar', 43)]))
+
+    def test_reassign_index_to_multiindex_with_mapping(self):
+        msa = TabularMSA([DNA('AC'), DNA('.G')])
+        mapping = {0: ('foo', 42), 1: ('bar', 43)}
+
+        msa.reassign_index(mapping=mapping)
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AC'), DNA('.G')],
+                       index=[('foo', 42), ('bar', 43)]))
+
+    @unittest.skipIf(six.PY2, "Everything is orderable in Python 2.")
+    def test_sort_on_unorderable_msa_index(self):
+        msa = TabularMSA([DNA('AAA'), DNA('ACG'), DNA('---')],
+                         index=[42, 41, 'foo'])
+        with self.assertRaises(TypeError):
+            msa.sort()
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AAA'), DNA('ACG'), DNA('---')],
+                       index=[42, 41, 'foo']))
+
+    def test_sort_empty_on_msa_index(self):
+        msa = TabularMSA([], index=[])
+        msa.sort()
+        self.assertEqual(msa, TabularMSA([], index=[]))
+
+        msa = TabularMSA([], index=[])
+        msa.sort(ascending=False)
+        self.assertEqual(msa, TabularMSA([], index=[]))
+
+    def test_sort_single_sequence_on_msa_index(self):
+        msa = TabularMSA([DNA('ACGT')], index=[42])
+        msa.sort()
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[42]))
+
+        msa = TabularMSA([DNA('ACGT')], index=[42])
+        msa.sort(ascending=False)
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[42]))
+
+    def test_sort_multiple_sequences_on_msa_index(self):
+        msa = TabularMSA([
+            DNA('TC'), DNA('GG'), DNA('CC')], index=['z', 'a', 'b'])
+        msa.sort(ascending=True)
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('GG'), DNA('CC'), DNA('TC')], index=['a', 'b', 'z']))
+
+        msa = TabularMSA([
+            DNA('TC'), DNA('GG'), DNA('CC')], index=['z', 'a', 'b'])
+        msa.sort(ascending=False)
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('TC'), DNA('CC'), DNA('GG')], index=['z', 'b', 'a']))
+
+    def test_sort_on_labels_with_some_repeats(self):
+        msa = TabularMSA([
+            DNA('TCCG', metadata={'id': 10}),
+            DNA('TAGG', metadata={'id': 10}),
+            DNA('GGGG', metadata={'id': 8}),
+            DNA('TGGG', metadata={'id': 10}),
+            DNA('ACGT', metadata={'id': 0}),
+            DNA('TAGA', metadata={'id': 10})], minter='id')
+        msa.sort()
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('ACGT', metadata={'id': 0}),
+                DNA('GGGG', metadata={'id': 8}),
+                DNA('TCCG', metadata={'id': 10}),
+                DNA('TAGG', metadata={'id': 10}),
+                DNA('TGGG', metadata={'id': 10}),
+                DNA('TAGA', metadata={'id': 10})], minter='id'))
+
+    def test_sort_on_key_with_all_repeats(self):
+        msa = TabularMSA([
+            DNA('TTT', metadata={'id': 'a'}),
+            DNA('TTT', metadata={'id': 'b'}),
+            DNA('TTT', metadata={'id': 'c'})], minter=str)
+        msa.sort()
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('TTT', metadata={'id': 'a'}),
+                DNA('TTT', metadata={'id': 'b'}),
+                DNA('TTT', metadata={'id': 'c'})], minter=str))
+
+    def test_sort_already_sorted(self):
+        msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[1, 2, 3])
+        msa.sort()
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[1, 2, 3]))
+
+        msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[3, 2, 1])
+        msa.sort(ascending=False)
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[3, 2, 1]))
+
+    def test_sort_reverse_sorted(self):
+        msa = TabularMSA([DNA('T'), DNA('G'), DNA('A')], index=[3, 2, 1])
+        msa.sort()
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('A'), DNA('G'), DNA('T')], index=[1, 2, 3]))
+
+        msa = TabularMSA([DNA('T'), DNA('G'), DNA('A')], index=[1, 2, 3])
+        msa.sort(ascending=False)
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('A'), DNA('G'), DNA('T')], index=[3, 2, 1]))
+
+    def test_sort_multiindex(self):
+        multiindex = [(2, 'a'), (1, 'c'), (3, 'b')]
+        sortedindex = [(1, 'c'), (2, 'a'), (3, 'b')]
+        msa = TabularMSA([DNA('A'), DNA('C'), DNA('G')], index=multiindex)
+        msa.sort()
+        self.assertEqual(msa, TabularMSA([DNA('C'), DNA('A'), DNA('G')],
+                                         index=sortedindex))
+
+    def test_sort_multiindex_with_level(self):
+        multiindex = [(2, 'a'), (1, 'c'), (3, 'b')]
+        first_sorted = [(1, 'c'), (2, 'a'), (3, 'b')]
+        second_sorted = [(2, 'a'), (3, 'b'), (1, 'c')]
+
+        msa = TabularMSA([DNA('A'), DNA('C'), DNA('G')], index=multiindex)
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+
+        msa.sort(level=0)
+        self.assertEqual(msa, TabularMSA([DNA('C'), DNA('A'), DNA('G')],
+                                         index=first_sorted))
+        msa.sort(level=1)
+        self.assertEqual(msa, TabularMSA([DNA('A'), DNA('G'), DNA('C')],
+                                         index=second_sorted))
+
+    def test_to_dict_falsey_msa(self):
+        self.assertEqual(TabularMSA([]).to_dict(), {})
+        self.assertEqual(TabularMSA([RNA('')], index=['foo']).to_dict(),
+                         {'foo': RNA('')})
+
+    def test_to_dict_non_empty(self):
+        seqs = [Protein('PAW', metadata={'id': 42}),
+                Protein('WAP', metadata={'id': -999})]
+        msa = TabularMSA(seqs, minter='id')
+        self.assertEqual(msa.to_dict(), {42: seqs[0], -999: seqs[1]})
+
+    def test_to_dict_duplicate_labels(self):
+        msa = TabularMSA([DNA("A"), DNA("G")], index=[0, 0])
+
+        with self.assertRaises(ValueError) as cm:
+            msa.to_dict()
+
+        self.assertIn("unique", str(cm.exception))
+
+    def test_from_dict_to_dict_roundtrip(self):
+        d = {}
+        self.assertEqual(TabularMSA.from_dict(d).to_dict(), d)
+
+        # can roundtrip even with mixed key types
+        d1 = {'a': DNA('CAT'), 42: DNA('TAG')}
+        d2 = TabularMSA.from_dict(d1).to_dict()
+        self.assertEqual(d2, d1)
+        self.assertIs(d1['a'], d2['a'])
+        self.assertIs(d1[42], d2[42])
+
+
+class TestContains(unittest.TestCase):
+    def test_no_sequences(self):
+        msa = TabularMSA([], index=[])
+
+        self.assertFalse('' in msa)
+        self.assertFalse('foo' in msa)
+
+    def test_with_str_labels(self):
+        msa = TabularMSA([RNA('AU'), RNA('A.')], index=['foo', 'bar'])
+
+        self.assertTrue('foo' in msa)
+        self.assertTrue('bar' in msa)
+        self.assertFalse('baz' in msa)
+        self.assertFalse(0 in msa)
+
+    def test_with_int_labels(self):
+        msa = TabularMSA([RNA('AU'), RNA('A.')], index=[42, -1])
+
+        self.assertTrue(42 in msa)
+        self.assertTrue(-1 in msa)
+        self.assertFalse(0 in msa)
+        self.assertFalse('foo' in msa)
+
+
+class TestCopy(unittest.TestCase):
+    # Note: tests for metadata/positional_metadata are in mixin tests above.
+
+    def test_no_sequences(self):
+        msa = TabularMSA([])
+        msa_copy = copy.copy(msa)
+
+        self.assertEqual(msa, msa_copy)
+        self.assertIsNot(msa, msa_copy)
+        self.assertIsNot(msa._seqs, msa_copy._seqs)
+
+    def test_with_sequences(self):
+        msa = TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')])
+        msa_copy = copy.copy(msa)
+
+        self.assertEqual(msa, msa_copy)
+        self.assertIsNot(msa, msa_copy)
+        self.assertIsNot(msa._seqs, msa_copy._seqs)
+        self.assertIsNot(msa[0], msa_copy[0])
+        self.assertIsNot(msa[1], msa_copy[1])
+
+        msa_copy.append(DNA('AAAA'))
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
+
+        msa_copy._seqs[0].metadata['bar'] = 42
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
+
+        msa_copy._seqs[0].metadata['foo'].append(2)
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT', metadata={'foo': [1, 2]}), DNA('TGCA')]))
+
+    def test_with_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], index=['foo', 'bar'])
+        msa_copy = copy.copy(msa)
+
+        self.assertEqual(msa, msa_copy)
+        self.assertIsNot(msa, msa_copy)
+        self.assertIsNot(msa.index, msa_copy.index)
+
+        msa_copy.index = [1, 2]
+        assert_index_equal(msa_copy.index, pd.Index([1, 2]))
+        assert_index_equal(msa.index, pd.Index(['foo', 'bar']))
+
+
+class TestDeepCopy(unittest.TestCase):
+    # Note: tests for metadata/positional_metadata are in mixin tests above.
+
+    def test_no_sequences(self):
+        msa = TabularMSA([])
+        msa_copy = copy.deepcopy(msa)
+
+        self.assertEqual(msa, msa_copy)
+        self.assertIsNot(msa, msa_copy)
+        self.assertIsNot(msa._seqs, msa_copy._seqs)
+
+    def test_with_sequences(self):
+        msa = TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')])
+        msa_copy = copy.deepcopy(msa)
+
+        self.assertEqual(msa, msa_copy)
+        self.assertIsNot(msa, msa_copy)
+        self.assertIsNot(msa._seqs, msa_copy._seqs)
+        self.assertIsNot(msa[0], msa_copy[0])
+        self.assertIsNot(msa[1], msa_copy[1])
+
+        msa_copy.append(DNA('AAAA'))
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
+
+        msa_copy._seqs[0].metadata['bar'] = 42
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
+
+        msa_copy._seqs[0].metadata['foo'].append(2)
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
+
+    def test_with_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], index=['foo', 'bar'])
+        msa_copy = copy.deepcopy(msa)
+
+        self.assertEqual(msa, msa_copy)
+        self.assertIsNot(msa, msa_copy)
+        self.assertIsNot(msa.index, msa_copy.index)
+
+        msa_copy.index = [1, 2]
+        assert_index_equal(msa_copy.index, pd.Index([1, 2]))
+        assert_index_equal(msa.index, pd.Index(['foo', 'bar']))
+
+
+class SharedIndexTests(object):
+    def get(self, obj, indexable):
+        raise NotImplementedError()
+
+    def test_tuple_too_big(self):
+        with self.assertRaises(ValueError):
+            self.get(TabularMSA([]), (None, None, None))
+
+    def test_empty_msa_slice(self):
+        msa = TabularMSA([])
+
+        new = self.get(msa, slice(None, None))
+
+        self.assertIsNot(msa, new)
+        self.assertEqual(msa, new)
+
+    def test_msa_slice_all_first_axis(self):
+        msa = TabularMSA([RNA("AAA", metadata={1: 1}),
+                          RNA("AAU", positional_metadata={0: [1, 2, 3]})],
+                         metadata={0: 0}, positional_metadata={1: [3, 2, 1]})
+
+        new_slice = self.get(msa, slice(None))
+        new_ellipsis = self.get(msa, Ellipsis)
+
+        self.assertIsNot(msa, new_slice)
+        for s1, s2 in zip(msa, new_slice):
+            self.assertIsNot(s1, s2)
+        self.assertEqual(msa, new_slice)
+
+        self.assertIsNot(msa, new_ellipsis)
+        for s1, s2 in zip(msa, new_ellipsis):
+            self.assertIsNot(s1, s2)
+        self.assertEqual(msa, new_ellipsis)
+
+    def test_msa_slice_all_both_axes(self):
+        msa = TabularMSA([RNA("AAA", metadata={1: 1}),
+                          RNA("AAU", positional_metadata={0: [1, 2, 3]})],
+                         metadata={0: 0}, positional_metadata={1: [3, 2, 1]})
+
+        new_slice = self.get(msa, (slice(None), slice(None)))
+        new_ellipsis = self.get(msa, (Ellipsis, Ellipsis))
+
+        self.assertIsNot(msa, new_slice)
+        for s1, s2 in zip(msa, new_slice):
+            self.assertIsNot(s1, s2)
+        self.assertEqual(msa, new_slice)
+
+        self.assertIsNot(msa, new_ellipsis)
+        for s1, s2 in zip(msa, new_ellipsis):
+            self.assertIsNot(s1, s2)
+        self.assertEqual(msa, new_ellipsis)
+
+    def test_bool_index_first_axis(self):
+        a = DNA("AAA", metadata={1: 1})
+        b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
+        c = DNA("AAC")
+        msa = TabularMSA([a, b, c], metadata={0: 'x'},
+                         positional_metadata={0: [1, 2, 3]},
+                         index=[True, False, True])
+
+        new = self.get(msa, [True, True, False])
+
+        self.assertEqual(new, TabularMSA([a, b], metadata={0: 'x'},
+                                         positional_metadata={0: [1, 2, 3]},
+                                         index=[True, False]))
+
+    def test_bool_index_second_axis(self):
+        a = DNA("AAA", metadata={1: 1})
+        b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
+        c = DNA("AAC")
+        msa = TabularMSA([a, b, c], metadata={0: 'x'},
+                         positional_metadata={0: [1, 2, 3]},
+                         index=[True, False, True])
+
+        new = self.get(msa, (Ellipsis, [True, True, False]))
+
+        self.assertEqual(new, TabularMSA([a[0, 1], b[0, 1], c[0, 1]],
+                                         metadata={0: 'x'},
+                                         positional_metadata={0: [1, 2]},
+                                         index=[True, False, True]))
+
+    def test_bool_index_both_axes(self):
+        a = DNA("AAA", metadata={1: 1})
+        b = DNA("NNN", positional_metadata={1: ['x', 'y', 'z']})
+        c = DNA("AAC")
+        msa = TabularMSA([a, b, c], metadata={0: 'x'},
+                         positional_metadata={0: [1, 2, 3]},
+                         index=[True, False, True])
+
+        new = self.get(msa, ([False, True, True], [True, True, False]))
+
+        self.assertEqual(new, TabularMSA([b[0, 1], c[0, 1]],
+                                         metadata={0: 'x'},
+                                         positional_metadata={0: [1, 2]},
+                                         index=[False, True]))
+
+    def test_bool_index_too_big(self):
+        msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")],
+                         index=[False, True, False])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, [False, False, False, False])
+        with self.assertRaises(IndexError):
+            self.get(msa, [True, True, True, True])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, (Ellipsis, [True, False, True, False, True]))
+
+        with self.assertRaises(IndexError):
+            self.get(msa, ([True, False, True, False],
+                           [True, False, True, False, False]))
+
+    def test_bool_index_too_small(self):
+        msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")],
+                         index=[False, True, False])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, [False])
+        with self.assertRaises(IndexError):
+            self.get(msa, [True])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, (Ellipsis, [True]))
+
+        with self.assertRaises(IndexError):
+            self.get(msa, ([True, False], [True, False, True, False]))
+
+    def test_bad_scalar(self):
+        msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
+
+        with self.assertRaises((KeyError, TypeError)):
+            self.get(msa, "foo")
+
+        with self.assertRaises(IndexError):
+            self.get(msa, (Ellipsis, "foo"))
+
+    def test_bad_fancy_index(self):
+        msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
+
+        with self.assertRaises((KeyError, TypeError)):
+            self.get(msa, [0, "foo"])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, (Ellipsis, [0, "foo"]))
+
+    def test_asburd_slice(self):
+        msa = TabularMSA([DNA("ABCD"), DNA("GHKM"), DNA("NRST")])
+
+        with self.assertRaises(TypeError):
+            self.get(msa, {set(1): 0})
+
+
+class SharedPropertyIndexTests(SharedIndexTests):
+    def setUp(self):
+        self.combo_msa = TabularMSA([
+            DNA('ACGTA', metadata={0: 0},
+                positional_metadata={0: [1, 2, 3, 4, 5]}),
+            DNA('CGTAC', metadata={1: 1},
+                positional_metadata={1: [1, 2, 3, 4, 5]}),
+            DNA('GTACG', metadata={2: 2},
+                positional_metadata={2: [1, 2, 3, 4, 5]}),
+            DNA('TACGT', metadata={3: 3},
+                positional_metadata={3: [1, 2, 3, 4, 5]}),
+            DNA('ACGTT', metadata={4: 4},
+                positional_metadata={4: [1, 2, 3, 4, 5]})
+            ], index=list('ABCDE'), metadata={'x': 'x'},
+            positional_metadata={'y': [5, 4, 3, 2, 1]})
+
+        """First off, sorry to the next person who has to deal with this.
+
+           The next few tests will try and slice by a bunch of stuff, with
+           all combinations. Each element in the two lists is a tuple where
+           the first element is the thing to slice with, and the second is
+           the equivalent fancy index which describes the same range.
+
+           This lets us describe the results a little more declaratively
+           without setting up a thousand tests for each possible combination.
+           This does mean the iloc via a fancy index and simple scalar must
+           work correctly.
+        """
+        # This will be overriden for TestLoc because the first axis are labels
+        self.combo_first_axis = [
+            ([], []),
+            (slice(0, 0), []),
+            (Ellipsis, [0, 1, 2, 3, 4]),
+            (slice(None), [0, 1, 2, 3, 4]),
+            (slice(0, 10000), [0, 1, 2, 3, 4]),
+            (3, 3),
+            (-4, 1),
+            ([0], [0]),
+            ([2], [2]),
+            (slice(1, 3), [1, 2]),
+            (slice(3, 0, -1), [3, 2, 1]),
+            ([-3, 2, 1], [2, 2, 1]),
+            ([-4, -3, -2, -1], [1, 2, 3, 4]),
+            (np.array([-3, 2, 1]), [2, 2, 1]),
+            ([True, True, False, False, True], [0, 1, 4]),
+            (np.array([True, True, False, True, False]), [0, 1, 3]),
+            (range(3), [0, 1, 2]),
+            ([slice(0, 2), slice(3, 4), 4], [0, 1, 3, 4])
+        ]
+        # Same in both TestLoc and TestILoc
+        self.combo_second_axis = self.combo_first_axis
+
+    def test_combo_single_axis_natural(self):
+        for idx, exp in self.combo_first_axis:
+            self.assertEqual(self.get(self.combo_msa, idx),
+                             self.combo_msa.iloc[exp],
+                             msg="%r did not match iloc[%r]" % (idx, exp))
+
+    def test_combo_first_axis_only(self):
+        for idx, exp in self.combo_first_axis:
+            self.assertEqual(self.get(self.combo_msa, idx, axis=0),
+                             self.combo_msa.iloc[exp, ...],
+                             msg="%r did not match iloc[%r, ...]" % (idx, exp))
+
+    def test_combo_second_axis_only(self):
+        for idx, exp in self.combo_second_axis:
+            self.assertEqual(self.get(self.combo_msa, idx, axis=1),
+                             self.combo_msa.iloc[..., exp],
+                             msg="%r did not match iloc[..., %r]" % (idx, exp))
+
+    def test_combo_both_axes(self):
+        for idx1, exp1 in self.combo_first_axis:
+            for idx2, exp2 in self.combo_second_axis:
+                self.assertEqual(self.get(self.combo_msa, (idx1, idx2)),
+                                 self.combo_msa.iloc[exp1, exp2],
+                                 msg=("%r did not match iloc[%r, %r]"
+                                      % ((idx1, idx2), exp1, exp2)))
+
+
+class TestLoc(SharedPropertyIndexTests, unittest.TestCase):
+    def setUp(self):
+        SharedPropertyIndexTests.setUp(self)
+        self.combo_first_axis = [
+            ([], []),
+            (slice('X', "Z"), []),
+            ('A', 0),
+            ('E', 4),
+            (['B'], [1]),
+            (np.asarray(['B']), [1]),
+            (slice('A', 'C', 2), [0, 2]),
+            (slice('C', 'A', -2), [2, 0]),
+            (slice('A', 'B'), [0, 1]),
+            (slice(None), [0, 1, 2, 3, 4]),
+            (slice('A', None), [0, 1, 2, 3, 4]),
+            (slice(None, 'C'), [0, 1, 2]),
+            (Ellipsis, [0, 1, 2, 3, 4]),
+            (self.combo_msa.index, [0, 1, 2, 3, 4]),
+            (['B', 'A', 'A', 'C'], [1, 0, 0, 2]),
+            (np.asarray(['B', 'A', 'A', 'C']), [1, 0, 0, 2]),
+            ([True, False, True, True, False], [0, 2, 3]),
+            (np.asarray([True, False, True, True, False]), [0, 2, 3]),
+        ]
+
+    def test_forced_axis_returns_copy(self):
+        msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
+
+        self.assertIsNot(msa.loc(axis=1), msa.loc)
+
+    def test_forced_axis_no_mutate(self):
+        msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
+
+        self.assertEqual(msa.loc(axis=1)[0], Sequence("EE"))
+        self.assertEqual(msa.loc[0], Protein("EVANTHQMVS"))
+        self.assertIsNone(msa.loc._axis)
+
+    def get(self, obj, indexable, axis=None):
+        if axis is None:
+            return obj.loc[indexable]
+        else:
+            return obj.loc(axis=axis)[indexable]
+
+    def test_complex_single_label(self):
+        a = DNA("ACG")
+        b = DNA("ACT")
+        c = DNA("ACA")
+        msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
+
+        self.assertIs(a, self.get(msa, (('a', 0),)))
+        self.assertIs(b, self.get(msa, (('a', 1),)))
+        self.assertIs(c, self.get(msa, (('b', 0),)))
+
+    def test_partial_label(self):
+        a = DNA("ACG")
+        b = DNA("ACT")
+        c = DNA("ACA")
+        msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
+        exp_a = TabularMSA([a, b], index=[0, 1])
+        exp_b = TabularMSA([c], index=[0])
+
+        self.assertEqual(self.get(msa, 'a'), exp_a)
+        self.assertEqual(self.get(msa, 'b'), exp_b)
+
+    def test_label_not_exists(self):
+        msa = TabularMSA([DNA("ACG")], index=['foo'])
+
+        with self.assertRaises(KeyError):
+            self.get(msa, 'bar')
+
+    def test_duplicate_index_nonscalar_label(self):
+        a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+        d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
+                         positional_metadata={'z': [1, 2, 3, 4]},
+                         index=[0, 0, 1, 2])
+
+        self.assertEqual(self.get(msa, 0),
+                         TabularMSA([a, b], metadata={'x': 'y'},
+                                    positional_metadata={'z': [1, 2, 3, 4]},
+                                    index=[0, 0]))
+
+    def test_duplicate_index_scalar_label(self):
+        a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+        d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
+                         positional_metadata={'z': [1, 2, 3, 4]},
+                         index=[0, 0, 1, 2])
+
+        self.assertEqual(self.get(msa, 1), c)
+
+    def test_multiindex_complex(self):
+        a = DNA("ACG")
+        b = DNA("ACT")
+        c = DNA("ACA")
+        msa = TabularMSA([a, b, c], index=[('a', 0), ('a', 1), ('b', 0)])
+        exp = TabularMSA([a, c], index=[('a', 0), ('b', 0)])
+
+        self.assertEqual(self.get(msa, [('a', 0), ('b', 0)]), exp)
+
+    def test_fancy_index_missing_label(self):
+        msa = TabularMSA([DNA("ACG")], index=['foo'])
+
+        with self.assertRaises(KeyError):
+            self.get(msa, ['foo', 'bar'])
+
+        with self.assertRaises(KeyError):
+            self.get(msa, ['bar'])
+
+    def test_multiindex_fancy_indexing_incomplete_label(self):
+        a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
+        c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
+        d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
+        msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
+                         positional_metadata={'c': ['a', 'b', 'c', 'd']},
+                         index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
+                                ('b', 'x', 0)])
+
+        self.assertEqual(self.get(msa, (('a', 'x'), Ellipsis)),
+                         TabularMSA([a, b], metadata={'x': 'y'},
+                                    positional_metadata={'c': ['a', 'b', 'c',
+                                                               'd']},
+                                    index=[0, 1]))
+
+    def test_multiindex_complicated_axis(self):
+        a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
+        c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
+        d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
+        msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
+                         positional_metadata={'c': ['a', 'b', 'c', 'd']},
+                         index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
+                                ('b', 'x', 0)])
+
+        self.assertEqual(self.get(msa, (([False, True, False, True],
+                                         'x', 0), Ellipsis)),
+                         TabularMSA([d], metadata={'x': 'y'},
+                                    positional_metadata={'c': ['a', 'b', 'c',
+                                                               'd']},
+                                    index=[('b', 'x', 0)]))
+
+    def test_multiindex_complicated_axis_empty_selection(self):
+        a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
+        c = RNA("UAA-", metadata={2: 0}, positional_metadata={2: [1, 2, 3, 4]})
+        d = RNA("UA-G", metadata={3: 0}, positional_metadata={3: [1, 2, 3, 4]})
+        msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
+                         positional_metadata={'c': ['a', 'b', 'c', 'd']},
+                         index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
+                                ('b', 'x', 0)])
+
+        self.assertEqual(self.get(msa, (([False, True, False, True],
+                                         'x', 2), Ellipsis)),
+                         TabularMSA([], metadata={'x': 'y'},
+                                    # TODO: Change for #1198
+                                    positional_metadata=None,
+                                    index=[]))
+
+    def test_bool_index_scalar_bool_label(self):
+        a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+        d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
+                         positional_metadata={'z': [1, 2, 3, 4]},
+                         index=[False, True, False, False])
+
+        self.assertEqual(self.get(msa, True), b)
+
+    def test_bool_index_nonscalar_bool_label(self):
+        a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("A-GA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("AAGA", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+        d = DNA("ACCA", metadata={3: 3}, positional_metadata={3: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c, d], metadata={'x': 'y'},
+                         positional_metadata={'z': [1, 2, 3, 4]},
+                         index=[False, True, False, True])
+
+        self.assertEqual(self.get(msa, True),
+                         TabularMSA([b, d], metadata={'x': 'y'},
+                                    positional_metadata={'z': [1, 2, 3, 4]},
+                                    index=[True, True]))
+
+    def test_unhashable_index_first_axis(self):
+        s = slice(0, 1)
+        msa = TabularMSA([Protein(""), Protein(""), Protein("")],
+                         index=[s, slice(1, 2), slice(2, 3)])
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+            self.get(msa, Ellipsis, axis=0)
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+            self.get(msa, s, axis=0)
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+            self.get(msa, 0, axis=0)
+
+    def test_unhashable_index_second_axis(self):
+        msa = TabularMSA([Protein("AA"), Protein("CC"), Protein("AA")],
+                         index=[slice(0, 1), slice(1, 2), slice(2, 3)])
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+            self.get(msa, Ellipsis, axis=1)
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+            self.get(msa, [0, 1], axis=1)
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+            self.get(msa, 0, axis=1)
+
+    def test_unhashable_index_both_axes(self):
+        s = [0, 1]
+        msa = TabularMSA([RNA("AA"), RNA("CC"), RNA("AA")],
+                         index=[s, [1, 2], [2, 3]])
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable.*list'):
+            # This implies copy cannot be derived from getitem
+            self.get(msa, (Ellipsis, Ellipsis))
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable.*list'):
+            self.get(msa, (s, 0))
+
+        with six.assertRaisesRegex(self, TypeError, 'unhashable.*list'):
+            self.get(msa, ('x', 10))
+
+    def test_categorical_index_scalar_label(self):
+        msa = TabularMSA([RNA("ACUG"), RNA("ACUA"), RNA("AAUG"), RNA("AC-G")],
+                         index=pd.CategoricalIndex(['a', 'b', 'b', 'c']))
+
+        self.assertEqual(self.get(msa, 'a'), RNA("ACUG"))
+
+    def test_categorical_index_nonscalar_label(self):
+        msa = TabularMSA([RNA("ACUG"), RNA("ACUA"), RNA("AAUG"), RNA("AC-G")],
+                         index=pd.CategoricalIndex(['a', 'b', 'b', 'c']))
+
+        self.assertEqual(self.get(msa, 'b'),
+                         TabularMSA([RNA("ACUA"), RNA("AAUG")],
+                                    index=pd.CategoricalIndex(
+                                        ['b', 'b'], categories=['a', 'b', 'c'])
+                                    ))
+
+    def test_float_index_out_of_order_slice(self):
+        msa = TabularMSA([DNA("ACGG"), DNA("AAGC"), DNA("AAAA"), DNA("ACTC")],
+                         index=[0.1, 2.4, 5.1, 2.6])
+
+        with self.assertRaises(KeyError):
+            self.get(msa, slice(0.1, 2.7))
+
+        msa.sort()
+        result = self.get(msa, slice(0.1, 2.7))
+
+        self.assertEqual(result, TabularMSA([DNA("ACGG"), DNA("AAGC"),
+                                             DNA("ACTC")],
+                                            index=[0.1, 2.4, 2.6]))
+
+    def test_nonscalar_fancy_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
+                         index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'tuple.*independent.*MultiIndex'):
+            self.get(msa, ['a', 'b'])
+
+    def test_missing_first_nonscalar_fancy_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
+                         index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
+
+        with self.assertRaises(KeyError):
+            self.get(msa, ['x', 'a', 'b'])
+
+    def test_tuple_fancy_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
+                         index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'tuple.*pd.MultiIndex.*label'):
+            self.get(msa, ((('a', 0, 1), ('b', 0, 1)), Ellipsis))
+
+    def test_non_multiindex_tuple(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')])
+
+        with six.assertRaisesRegex(self, TypeError, 'tuple.*first axis'):
+            self.get(msa, ((0, 1), Ellipsis))
+
+    def test_assertion_exists_for_future_failure_of_get_sequence_loc(self):
+        # Ideally we wouldn't need this test or the branch, but the most common
+        # failure for pandas would be returning a series instead of the value.
+        # We should make sure that the user get's an error should this ever
+        # happen again. Getting a series of DNA looks pretty weird...
+        msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')])
+
+        with self.assertRaises(AssertionError):
+            msa._get_sequence_loc_([1, 2])
+
+
+class TestILoc(SharedPropertyIndexTests, unittest.TestCase):
+    def setUp(self):
+        SharedPropertyIndexTests.setUp(self)
+        self.combo_first_axis = self.combo_second_axis
+
+    def test_forced_axis_returns_copy(self):
+        msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
+
+        self.assertIsNot(msa.iloc(axis=1), msa.iloc)
+
+    def test_forced_axis_no_mutate(self):
+        msa = TabularMSA([Protein("EVANTHQMVS"), Protein("EVANTH*MVS")])
+
+        self.assertEqual(msa.iloc(axis=1)[0], Sequence("EE"))
+        self.assertEqual(msa.iloc[0], Protein("EVANTHQMVS"))
+        self.assertIsNone(msa.iloc._axis)
+
+    def get(self, obj, indexable, axis=None):
+        if axis is None:
+            return obj.iloc[indexable]
+        else:
+            return obj.iloc(axis=axis)[indexable]
+
+    def test_entire_fancy_first_axis(self):
+        msa = TabularMSA([
+            DNA("ACCA", metadata={'a': 'foo'},
+                positional_metadata={'a': [7, 6, 5, 4]}),
+            DNA("GGAA", metadata={'b': 'bar'},
+                positional_metadata={'b': [3, 4, 5, 6]})
+            ], metadata={'c': 'baz'},
+            positional_metadata={'foo': [1, 2, 3, 4]})
+
+        new_np_simple = self.get(msa, np.arange(2))
+        new_list_simple = self.get(msa, [0, 1])
+        new_list_backwards = self.get(msa, [-2, -1])
+
+        self.assertIsNot(msa, new_np_simple)
+        self.assertEqual(msa, new_np_simple)
+
+        self.assertIsNot(msa, new_list_simple)
+        self.assertEqual(msa, new_list_simple)
+
+        self.assertIsNot(msa, new_list_backwards)
+        self.assertEqual(msa, new_list_backwards)
+
+    def test_fancy_entire_second_axis(self):
+        msa = TabularMSA([
+            DNA("ACCA", metadata={'a': 'foo'},
+                positional_metadata={'a': [7, 6, 5, 4]}),
+            DNA("GGAA", metadata={'b': 'bar'},
+                positional_metadata={'b': [3, 4, 5, 6]})
+            ], metadata={'c': 'baz'},
+            positional_metadata={'foo': [1, 2, 3, 4]})
+
+        new_np_simple = self.get(msa, (Ellipsis, np.arange(4)))
+        new_list_simple = self.get(msa, (Ellipsis, [0, 1, 2, 3]))
+        new_list_backwards = self.get(msa, (Ellipsis, [-4, -3, -2, -1]))
+
+        self.assertIsNot(msa, new_np_simple)
+        self.assertEqual(msa, new_np_simple)
+
+        self.assertIsNot(msa, new_list_simple)
+        self.assertEqual(msa, new_list_simple)
+
+        self.assertIsNot(msa, new_list_backwards)
+        self.assertEqual(msa, new_list_backwards)
+
+    def test_fancy_entire_both_axes(self):
+        msa = TabularMSA([
+            DNA("ACCA", metadata={'a': 'foo'},
+                positional_metadata={'a': [7, 6, 5, 4]}),
+            DNA("GGAA", metadata={'b': 'bar'},
+                positional_metadata={'b': [3, 4, 5, 6]})
+            ], metadata={'c': 'baz'},
+            positional_metadata={'foo': [1, 2, 3, 4]})
+
+        new_np_simple = self.get(msa, (np.arange(2), np.arange(4)))
+        new_list_simple = self.get(msa, ([0, 1], [0, 1, 2, 3]))
+        new_list_backwards = self.get(msa, ([-2, -1], [-4, -3, -2, -1]))
+
+        self.assertIsNot(msa, new_np_simple)
+        self.assertEqual(msa, new_np_simple)
+
+        self.assertIsNot(msa, new_list_simple)
+        self.assertEqual(msa, new_list_simple)
+
+        self.assertIsNot(msa, new_list_backwards)
+        self.assertEqual(msa, new_list_backwards)
+
+    def test_fancy_out_of_bound(self):
+        with self.assertRaises(IndexError):
+            self.get(TabularMSA([DNA('AC')]), [0, 1, 2])
+
+        with self.assertRaises(IndexError):
+            self.get(TabularMSA([DNA('AC')]), (Ellipsis, [0, 1, 2]))
+
+    def test_fancy_empty_both_axis(self):
+        msa = TabularMSA([DNA("ACGT", metadata={'x': 1}),
+                          DNA("TGCA", metadata={'y': 2})], index=list("AB"))
+
+        new_np_simple = self.get(msa, (np.arange(0), np.arange(0)))
+        new_list_simple = self.get(msa, ([], []))
+
+        self.assertEqual(TabularMSA([]), new_np_simple)
+        self.assertEqual(TabularMSA([]), new_list_simple)
+
+    def test_fancy_standard_first_axis(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+
+        self.assertEqual(self.get(msa, [0, 2]),
+                         TabularMSA([a, c], metadata={3: 3},
+                                    positional_metadata={3: [1, 2, 3, 4]},
+                                    index=[0, 2]))
+
+    def test_fancy_standard_second_axis(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+
+        self.assertEqual(self.get(msa, (Ellipsis, [0, 2])),
+                         TabularMSA([a[0, 2], b[0, 2], c[0, 2]],
+                                    metadata={3: 3},
+                                    positional_metadata={3: [1, 3]},
+                                    index=[0, 1, 2]))
+
+    def test_fancy_standard_both_axes(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+
+        self.assertEqual(self.get(msa, ([0, 2], [0, 2])),
+                         TabularMSA([a[0, 2], c[0, 2]],
+                                    metadata={3: 3},
+                                    positional_metadata={3: [1, 3]},
+                                    index=[0, 2]))
+
+    def test_fancy_empty_first_axis(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+        # TODO: Change for #1198
+        self.assertEqual(self.get(msa, []),
+                         TabularMSA([], metadata={3: 3}))
+
+    def test_fancy_empty_second_axis(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+
+        self.assertEqual(self.get(msa, (Ellipsis, [])),
+                         TabularMSA([a[0:0], b[0:0], c[0:0]],
+                                    metadata={3: 3},
+                                    positional_metadata={3: np.array(
+                                        [], dtype=int)}))
+
+    def test_fancy_empty_both_axes(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+        # TODO: Change for #1198
+        self.assertEqual(self.get(msa, ([], [])),
+                         TabularMSA([], metadata={3: 3}))
+
+    def test_fancy_out_of_bounds_first_axis(self):
+        msa = TabularMSA([DNA("ACGT"), DNA("GCAT")])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, [10])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, [0, 1, 10])
+
+    def test_fancy_out_of_bounds_second_axis(self):
+        msa = TabularMSA([DNA("ACGT"), DNA("GCAT")])
+
+        with self.assertRaises(IndexError):
+            self.get(msa, (Ellipsis, [10]))
+
+        with self.assertRaises(IndexError):
+            self.get(msa, (Ellipsis, [1, 2, 4]))
+
+    def test_get_scalar_first_axis(self):
+        a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
+        b = DNA("GG", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
+        msa = TabularMSA([a, b])
+
+        new0 = self.get(msa, 0)
+        new1 = self.get(msa, 1)
+
+        self.assertEqual(new0, a)
+        self.assertEqual(new1, b)
+
+    def test_get_scalar_second_axis(self):
+        a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
+        b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
+        msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
+
+        new0 = self.get(msa, (Ellipsis, 0))
+        new1 = self.get(msa, (Ellipsis, 1))
+
+        self.assertEqual(new0,
+                         Sequence("AG", metadata={'z': 5},
+                                  positional_metadata={'x': [1, np.nan],
+                                                       'y': [np.nan, 3]}))
+        self.assertEqual(new1,
+                         Sequence("AC", metadata={'z': 6},
+                                  positional_metadata={'x': [2, np.nan],
+                                                       'y': [np.nan, 4]}))
+
+    def test_scalar_sliced_first_axis(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGT", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+
+        self.assertEqual(self.get(msa, (1, [1, 3])),
+                         DNA("CT", metadata={1: 1},
+                             positional_metadata={1: [2, 4]}))
+
+    def test_scalar_sliced_second_axis(self):
+        a = DNA("ACGT", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+        b = DNA("ACGA", metadata={1: 1}, positional_metadata={1: [1, 2, 3, 4]})
+        c = DNA("ACGT", metadata={2: 2}, positional_metadata={2: [1, 2, 3, 4]})
+
+        msa = TabularMSA([a, b, c], metadata={3: 3},
+                         positional_metadata={3: [1, 2, 3, 4]})
+
+        self.assertEqual(self.get(msa, ([1, 2], 3)),
+                         Sequence("AT", metadata={3: 4},
+                                  positional_metadata={1: [4, np.nan],
+                                                       2: [np.nan, 4]}))
+
+    def test_get_scalar_out_of_bound_first_axis(self):
+        a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
+        b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
+        msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
+
+        with self.assertRaises(IndexError):
+            self.get(msa, 3)
+
+    def test_get_scalar_out_of_bound_second_axis(self):
+        a = DNA("AA", metadata={'a': 'foo'}, positional_metadata={'x': [1, 2]})
+        b = DNA("GC", metadata={'b': 'bar'}, positional_metadata={'y': [3, 4]})
+        msa = TabularMSA([a, b], positional_metadata={'z': [5, 6]})
+
+        with self.assertRaises(IndexError):
+            self.get(msa, (Ellipsis, 3))
+
+
+class TestGetItem(SharedIndexTests, unittest.TestCase):
+    def get(self, obj, indexable):
+        return obj[indexable]
+
+    def test_uses_iloc_not_loc(self):
+        a = DNA("ACGA")
+        b = DNA("ACGT")
+        msa = TabularMSA([a, b], index=[1, 0])
+
+        self.assertIs(msa[0], a)
+        self.assertIs(msa[1], b)
+
+
+class TestConstructor(unittest.TestCase):
+    def setUp(self):
+        self.seqs = [DNA("ACGT"), DNA("GCTA")]
+        self.m = {'x': 'y', 0: 1}
+        self.pm = pd.DataFrame({'foo': [1, 2, 3, 4]})
+        self.index = pd.Index(['a', 'b'])
+        self.msa = TabularMSA(self.seqs, metadata=self.m,
+                              positional_metadata=self.pm, index=self.index)
+
+    def test_no_override(self):
+        result = self.msa._constructor_()
+
+        self.assertEqual(self.msa, result)
+
+        for seq1, seq2 in zip(result, self.msa):
+            self.assertIsNot(seq1, seq2)
+
+        self.assertIsNot(result.metadata, self.msa.metadata)
+        self.assertIsNot(result.positional_metadata,
+                         self.msa.positional_metadata)
+
+    def test_sequence_override_same_seqs(self):
+        result = self.msa._constructor_(sequences=self.seqs)
+
+        self.assertEqual(self.msa, result)
+
+        for seq1, seq2 in zip(result, self.msa):
+            self.assertIsNot(seq1, seq2)
+
+        self.assertIsNot(result.metadata, self.msa.metadata)
+        self.assertIsNot(result.positional_metadata,
+                         self.msa.positional_metadata)
+
+    def test_sequence_override(self):
+        seqs = [RNA("ACGU"), RNA("GCUA")]
+
+        result = self.msa._constructor_(sequences=seqs)
+
+        self.assertNotEqual(result, self.msa)
+        self.assertEqual(list(result), seqs)
+        assert_index_equal(result.index, self.index)
+        self.assertEqual(result.metadata, self.m)
+        assert_data_frame_almost_equal(result.positional_metadata, self.pm)
+
+    def test_no_override_no_md(self):
+        msa = TabularMSA(self.seqs, index=self.index)
+
+        self.assertEqual(msa, msa._constructor_())
+
+    def test_metadata_override(self):
+        new_md = {'foo': {'x': 0}}
+
+        result = self.msa._constructor_(metadata=new_md)
+
+        self.assertNotEqual(result, self.msa)
+        self.assertEqual(list(result), self.seqs)
+        assert_index_equal(result.index, self.index)
+        self.assertEqual(result.metadata, new_md)
+        assert_data_frame_almost_equal(result.positional_metadata, self.pm)
+
+    def test_positional_metadata_override(self):
+        new_pm = pd.DataFrame({'x': [1, 2, 3, 4]})
+
+        result = self.msa._constructor_(positional_metadata=new_pm)
+
+        self.assertNotEqual(result, self.msa)
+        self.assertEqual(list(result), self.seqs)
+        assert_index_equal(result.index, self.index)
+        self.assertEqual(result.metadata, self.m)
+        assert_data_frame_almost_equal(result.positional_metadata, new_pm)
+
+    def test_index_override(self):
+        new_index = pd.Index([('a', 0), ('b', 1)])
+
+        result = self.msa._constructor_(index=new_index)
+
+        self.assertNotEqual(result, self.msa)
+        self.assertEqual(list(result), self.seqs)
+        assert_index_equal(result.index, new_index)
+        self.assertEqual(result.metadata, self.m)
+        assert_data_frame_almost_equal(result.positional_metadata, self.pm)
+
+
+class TestAppend(unittest.TestCase):
+    def test_to_empty_msa(self):
+        msa = TabularMSA([])
+
+        msa.append(DNA('ACGT'))
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')]))
+
+    def test_to_empty_with_minter(self):
+        msa = TabularMSA([], minter=str)
+
+        msa.append(DNA('ACGT'))
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')]))
+
+    def test_to_empty_msa_with_index(self):
+        msa = TabularMSA([])
+
+        msa.append(DNA('ACGT'), index='a')
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT')], index=['a']))
+
+    def test_to_empty_msa_invalid_dtype(self):
+        msa = TabularMSA([])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'IUPACSequence.*Sequence'):
+            msa.append(Sequence(''))
+
+        self.assertEqual(msa, TabularMSA([]))
+
+    def test_to_empty_msa_invalid_minter(self):
+        msa = TabularMSA([])
+
+        with self.assertRaises(KeyError):
+            msa.append(DNA('ACGT'), minter='id')
+
+        self.assertEqual(msa, TabularMSA([]))
+
+    def test_to_non_empty_msa_invalid_minter(self):
+        msa = TabularMSA([DNA('ACGT')], index=['foo'])
+
+        with self.assertRaises(KeyError):
+            msa.append(DNA('AAAA'), minter='id')
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
+
+    def test_wrong_dtype_rna(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'matching type.*RNA.*DNA'):
+            msa.append(RNA('UUUU'))
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_wrong_dtype_float(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'matching type.*float.*DNA'):
+            msa.append(42.0)
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_wrong_length(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+
+        with six.assertRaisesRegex(
+                self, ValueError,
+                'must match the number of positions.*5 != 4'):
+            msa.append(DNA('ACGTA'))
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_with_minter_metadata_key(self):
+        msa = TabularMSA([DNA('', metadata={'id': 'a'}),
+                          DNA('', metadata={'id': 'b'})],
+                         minter='id')
+
+        msa.append(DNA('', metadata={'id': 'c'}), minter='id')
+
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('', metadata={'id': 'a'}),
+                DNA('', metadata={'id': 'b'}),
+                DNA('', metadata={'id': 'c'})], minter='id'))
+
+    def test_with_minter_callable(self):
+        msa = TabularMSA([DNA('', metadata={'id': 'a'}),
+                          DNA('', metadata={'id': 'b'})],
+                         minter='id')
+
+        msa.append(DNA(''), minter=str)
+
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('', metadata={'id': 'a'}),
+                DNA('', metadata={'id': 'b'}),
+                DNA('')], index=['a', 'b', '']))
+
+    def test_with_index(self):
+        msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
+
+        msa.append(DNA('--'), index='foo')
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AC'), DNA('GT'), DNA('--')],
+                       index=['a', 'b', 'foo']))
+
+    def test_no_index_no_minter(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+
+        msa.append(DNA('AAAA'))
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('ACGT'), DNA('TGCA'), DNA('AAAA')]))
+
+    def test_no_index_no_minter_msa_has_non_default_labels(self):
+        msa = TabularMSA([DNA(''), DNA('')], index=['a', 'b'])
+
+        with six.assertRaisesRegex(self, ValueError, "provide.*minter.*index"):
+            msa.append(DNA(''))
+
+        self.assertEqual(msa, TabularMSA([DNA(''), DNA('')], index=['a', 'b']))
+
+    def test_with_index_type_change(self):
+        msa = TabularMSA([DNA('A'), DNA('.')])
+
+        msa.append(DNA('C'), index='foo')
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
+
+    def test_with_index_and_minter(self):
+        msa = TabularMSA([])
+
+        with six.assertRaisesRegex(self, ValueError, "both.*minter.*index"):
+            msa.append(DNA(''), index='', minter=str)
+
+        self.assertEqual(msa, TabularMSA([]))
+
+    def test_multiple_appends_to_empty_msa_with_default_labels(self):
+        msa = TabularMSA([])
+
+        msa.append(RNA('U--'))
+        msa.append(RNA('AA.'))
+
+        self.assertEqual(msa, TabularMSA([RNA('U--'), RNA('AA.')]))
+
+    def test_multiple_appends_to_non_empty_msa_with_default_labels(self):
+        msa = TabularMSA([RNA('U--'), RNA('AA.')])
+
+        msa.append(RNA('ACG'))
+        msa.append(RNA('U-U'))
+
+        self.assertEqual(
+            msa,
+            TabularMSA([RNA('U--'), RNA('AA.'), RNA('ACG'), RNA('U-U')]))
+
+    def test_with_multiindex_index(self):
+        msa = TabularMSA([])
+
+        msa.append(DNA('AA'), index=('foo', 42))
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42)]))
+
+    def test_with_multiindex_minter(self):
+        def multiindex_minter(seq):
+            return ('foo', 42)
+
+        msa = TabularMSA([])
+
+        msa.append(DNA('AC'), minter=multiindex_minter)
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42)]))
+
+
+class TestExtend(unittest.TestCase):
+    def test_empty_to_empty(self):
+        msa = TabularMSA([])
+
+        msa.extend([])
+
+        self.assertEqual(msa, TabularMSA([]))
+
+    def test_empty_to_non_empty(self):
+        msa = TabularMSA([DNA('AC')])
+
+        msa.extend([])
+
+        self.assertEqual(msa, TabularMSA([DNA('AC')]))
+
+    def test_single_sequence(self):
+        msa = TabularMSA([DNA('AC')])
+
+        msa.extend([DNA('-C')])
+
+        self.assertEqual(msa, TabularMSA([DNA('AC'), DNA('-C')]))
+
+    def test_multiple_sequences(self):
+        msa = TabularMSA([DNA('AC')])
+
+        msa.extend([DNA('-C'), DNA('AG')])
+
+        self.assertEqual(msa, TabularMSA([DNA('AC'), DNA('-C'), DNA('AG')]))
+
+    def test_from_iterable(self):
+        msa = TabularMSA([])
+
+        msa.extend(iter([DNA('ACGT'), DNA('TGCA')]))
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_from_tabular_msa_default_labels(self):
+        msa = TabularMSA([DNA('AC'), DNA('TG')])
+
+        msa.extend(TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')],
+                              index=['a', 'b', 'c']))
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
+                        DNA('AA')]))
+
+    def test_from_tabular_msa_non_default_labels(self):
+        msa = TabularMSA([DNA('AC'), DNA('TG')], index=['a', 'b'])
+
+        with six.assertRaisesRegex(self, ValueError, 'provide.*minter.*index'):
+            msa.extend(TabularMSA([DNA('GG'), DNA('CC')]))
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AC'), DNA('TG')], index=['a', 'b']))
+
+    def test_from_tabular_msa_with_index(self):
+        msa1 = TabularMSA([DNA('AC'), DNA('TG')])
+        msa2 = TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')])
+
+        msa1.extend(msa2, index=msa2.index)
+
+        self.assertEqual(
+            msa1,
+            TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
+                        DNA('AA')], index=[0, 1, 0, 1, 2]))
+
+    def test_minter_and_index(self):
+        with six.assertRaisesRegex(self, ValueError, 'both.*minter.*index'):
+            TabularMSA([]).extend([DNA('ACGT')], minter=str, index=['foo'])
+
+    def test_no_minter_no_index_to_empty(self):
+        msa = TabularMSA([])
+
+        msa.extend([DNA('ACGT'), DNA('TGCA')])
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_no_minter_no_index_to_non_empty(self):
+        msa = TabularMSA([DNA('ACGT')])
+
+        msa.extend([DNA('TGCA'), DNA('--..')])
+
+        self.assertEqual(msa,
+                         TabularMSA([DNA('ACGT'), DNA('TGCA'), DNA('--..')]))
+
+    def test_no_minter_no_index_msa_has_non_default_labels(self):
+        msa = TabularMSA([DNA('ACGT')], index=[1])
+
+        with six.assertRaisesRegex(self, ValueError, 'provide.*minter.*index'):
+            msa.extend([DNA('TGCA')])
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[1]))
+
+    def test_invalid_dtype(self):
+        msa = TabularMSA([])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'IUPACSequence.*Sequence'):
+            msa.extend([Sequence('')])
+
+        self.assertEqual(msa, TabularMSA([]))
+
+    def test_invalid_minter(self):
+        # This test (and the following error case tests) check that the MSA
+        # isn't mutated when an error is raised. The "invalid" sequence is
+        # preceded by valid sequence(s) to test one possible (buggy)
+        # implementation of extend(): looping over sequences and calling
+        # append(). These tests ensure that "valid" sequences aren't appended
+        # to the MSA before the error is raised.
+        msa = TabularMSA([DNA('ACGT')], index=['foo'])
+
+        with self.assertRaises(KeyError):
+            msa.extend([DNA('AAAA', metadata={'id': 'foo'}),
+                        DNA('----')], minter='id')
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
+
+    def test_mismatched_dtype(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'matching type.*RNA.*DNA'):
+            msa.extend([DNA('----'), RNA('UUUU')])
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_wrong_dtype_float(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'matching type.*float.*DNA'):
+            msa.extend([DNA('GGGG'), 42.0])
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_wrong_length(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+
+        with six.assertRaisesRegex(
+                self, ValueError,
+                'must match the number of positions.*5 != 4'):
+            msa.extend([DNA('TTTT'), DNA('ACGTA')])
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_sequences_index_length_mismatch(self):
+        msa = TabularMSA([])
+
+        with six.assertRaisesRegex(
+                self, ValueError,
+                'sequences.*2.*index length.*3'):
+            msa.extend([DNA('TTTT'), DNA('ACGT')], index=['a', 'b', 'c'])
+
+        self.assertEqual(msa, TabularMSA([]))
+
+    def test_with_minter_metadata_key(self):
+        msa = TabularMSA([DNA('', metadata={'id': 'a'}),
+                          DNA('', metadata={'id': 'b'})],
+                         minter='id')
+
+        msa.extend([DNA('', metadata={'id': 'c'}),
+                    DNA('', metadata={'id': 'd'})], minter='id')
+
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('', metadata={'id': 'a'}),
+                DNA('', metadata={'id': 'b'}),
+                DNA('', metadata={'id': 'c'}),
+                DNA('', metadata={'id': 'd'})], minter='id'))
+
+    def test_with_minter_callable(self):
+        msa = TabularMSA([DNA('A', metadata={'id': 'a'}),
+                          DNA('C', metadata={'id': 'b'})],
+                         minter='id')
+
+        msa.extend([DNA('G'), DNA('T')], minter=str)
+
+        self.assertEqual(
+            msa,
+            TabularMSA([
+                DNA('A', metadata={'id': 'a'}),
+                DNA('C', metadata={'id': 'b'}),
+                DNA('G'),
+                DNA('T')], index=['a', 'b', 'G', 'T']))
+
+    def test_with_index(self):
+        msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
+
+        msa.extend([DNA('--'), DNA('..')], index=['foo', 'bar'])
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AC'), DNA('GT'), DNA('--'), DNA('..')],
+                       index=['a', 'b', 'foo', 'bar']))
+
+    def test_with_index_type_change(self):
+        msa = TabularMSA([DNA('A'), DNA('.')])
+
+        msa.extend([DNA('C')], index=['foo'])
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
+
+    def test_multiple_extends_to_empty_msa_with_default_labels(self):
+        msa = TabularMSA([])
+
+        msa.extend([RNA('U-'), RNA('GG')])
+        msa.extend([RNA('AA')])
+
+        self.assertEqual(msa, TabularMSA([RNA('U-'), RNA('GG'), RNA('AA')]))
+
+    def test_multiple_extends_to_non_empty_msa_with_default_labels(self):
+        msa = TabularMSA([RNA('U--'), RNA('AA.')])
+
+        msa.extend([RNA('ACG'), RNA('GCA')])
+        msa.extend([RNA('U-U')])
+
+        self.assertEqual(
+            msa,
+            TabularMSA([RNA('U--'),
+                        RNA('AA.'),
+                        RNA('ACG'),
+                        RNA('GCA'),
+                        RNA('U-U')]))
+
+    def test_with_multiindex_index(self):
+        msa = TabularMSA([])
+
+        msa.extend([DNA('AA'), DNA('GG')], index=[('foo', 42), ('bar', 43)])
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+
+    def test_with_multiindex_minter(self):
+        def multiindex_minter(seq):
+            if str(seq) == 'AC':
+                return ('foo', 42)
+            else:
+                return ('bar', 43)
+
+        msa = TabularMSA([])
+
+        msa.extend([DNA('AC'), DNA('GG')], minter=multiindex_minter)
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+
+    def test_with_index_object(self):
+        msa = TabularMSA([])
+
+        msa.extend([DNA('AA'), DNA('GG')],
+                   index=pd.Index(['foo', 'bar']))
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AA'),
+                        DNA('GG')], index=['foo', 'bar']))
+
+
+class TestJoin(unittest.TestCase):
+    def test_invalid_how(self):
+        with six.assertRaisesRegex(self, ValueError, '`how`'):
+            TabularMSA([]).join(TabularMSA([]), how='really')
+
+    def test_invalid_other_type(self):
+        with six.assertRaisesRegex(self, TypeError, 'TabularMSA.*DNA'):
+            TabularMSA([]).join(DNA('ACGT'))
+
+    def test_dtype_mismatch(self):
+        with six.assertRaisesRegex(self, TypeError, 'dtype.*RNA.*DNA'):
+            TabularMSA([DNA('AC')]).join(TabularMSA([RNA('UG')]))
+
+        with six.assertRaisesRegex(self, TypeError, 'dtype.*None.*DNA'):
+            TabularMSA([DNA('AC')]).join(TabularMSA([]))
+
+        with six.assertRaisesRegex(self, TypeError, 'dtype.*DNA.*None'):
+            TabularMSA([]).join(TabularMSA([DNA('AC')]))
+
+    def test_duplicate_index_labels(self):
+        with six.assertRaisesRegex(self, ValueError,
+                                   "This MSA's index labels.*unique"):
+            TabularMSA([DNA('AC'), DNA('--')], index=[0, 0]).join(
+                TabularMSA([DNA('GT'), DNA('..')]))
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   "`other`'s index labels.*unique"):
+            TabularMSA([DNA('AC'), DNA('--')]).join(
+                TabularMSA([DNA('GT'), DNA('..')], index=[0, 0]))
+
+    def test_handles_missing_metadata_efficiently(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.')])
+        msa2 = TabularMSA([DNA('-C'),
+                           DNA('.G')])
+
+        joined = msa1.join(msa2)
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('AC-C'),
+                        DNA('G..G')]))
+        self.assertIsNone(msa1._metadata)
+        self.assertIsNone(msa1._positional_metadata)
+        self.assertIsNone(msa2._metadata)
+        self.assertIsNone(msa2._positional_metadata)
+        self.assertIsNone(joined._metadata)
+        self.assertIsNone(joined._positional_metadata)
+
+    def test_ignores_metadata(self):
+        msa1 = TabularMSA([DNA('AC', metadata={'id': 'a'}),
+                           DNA('G.', metadata={'id': 'b'}),
+                           DNA('C-', metadata={'id': 'c'})],
+                          metadata={'id': 'msa1'})
+        msa2 = TabularMSA([DNA('-C', metadata={'id': 'd'}),
+                           DNA('.G', metadata={'id': 'e'}),
+                           DNA('CA', metadata={'id': 'f'})], index=[2, 1, 0],
+                          metadata={'id': 'msa2'})
+
+        joined = msa1.join(msa2)
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('ACCA'),
+                        DNA('G..G'),
+                        DNA('C--C')]))
+
+    def test_outer_join_on_per_sequence_positional_metadata(self):
+        msa1 = TabularMSA([
+            DNA('AC', positional_metadata={'1': [1, 2], 'foo': ['a', 'b']}),
+            DNA('GT', positional_metadata={'2': [3, 4], 'foo': ['c', 'd']})])
+        msa2 = TabularMSA([
+            DNA('CA', positional_metadata={'3': [5, 6], 'foo': ['e', 'f']}),
+            DNA('TG', positional_metadata={'4': [7, 8], 'foo': ['g', 'h']})])
+
+        joined = msa1.join(msa2)
+
+        self.assertEqual(
+            joined,
+            TabularMSA([
+                DNA('ACCA',
+                    positional_metadata={'1': [1, 2, np.nan, np.nan],
+                                         '3': [np.nan, np.nan, 5, 6],
+                                         'foo': ['a', 'b', 'e', 'f']}),
+                DNA('GTTG',
+                    positional_metadata={'2': [3, 4, np.nan, np.nan],
+                                         '4': [np.nan, np.nan, 7, 8],
+                                         'foo': ['c', 'd', 'g', 'h']})]))
+
+    def test_no_sequences(self):
+        msa1 = TabularMSA([], positional_metadata={'foo': []})
+        msa2 = TabularMSA([], positional_metadata={'foo': []})
+
+        joined = msa1.join(msa2)
+
+        self.assertEqual(joined, TabularMSA([]))
+
+    def test_no_positions(self):
+        msa1 = TabularMSA([DNA('', positional_metadata={'1': []}),
+                           DNA('', positional_metadata={'2': []})],
+                          positional_metadata={'foo': []})
+        msa2 = TabularMSA([DNA('', positional_metadata={'3': []}),
+                           DNA('', positional_metadata={'4': []})],
+                          positional_metadata={'foo': []})
+
+        joined = msa1.join(msa2)
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('', positional_metadata={'1': [], '3': []}),
+                        DNA('', positional_metadata={'2': [], '4': []})],
+                       positional_metadata={'foo': []}))
+
+    def test_one_with_positions_one_without_positions(self):
+        msa1 = TabularMSA([DNA('A', positional_metadata={'1': ['a']}),
+                           DNA('C', positional_metadata={'2': ['b']})],
+                          positional_metadata={'foo': ['bar']})
+        msa2 = TabularMSA([DNA('', positional_metadata={'3': []}),
+                           DNA('', positional_metadata={'4': []})],
+                          positional_metadata={'foo': []})
+
+        joined = msa1.join(msa2)
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('A', positional_metadata={'1': ['a'],
+                                                      '3': [np.nan]}),
+                        DNA('C', positional_metadata={'2': ['b'],
+                                                      '4': [np.nan]})],
+                       positional_metadata={'foo': ['bar']}))
+
+    def test_how_strict(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.'),
+                           DNA('C-')],
+                          positional_metadata={'foo': [1, 2],
+                                               'bar': ['a', 'b']})
+        msa2 = TabularMSA([DNA('-C'),
+                           DNA('.G'),
+                           DNA('CA')], index=[2, 1, 0],
+                          positional_metadata={'foo': [3, 4],
+                                               'bar': ['c', 'd']})
+
+        joined = msa1.join(msa2)
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('ACCA'),
+                        DNA('G..G'),
+                        DNA('C--C')],
+                       positional_metadata={'foo': [1, 2, 3, 4],
+                                            'bar': ['a', 'b', 'c', 'd']}))
+
+    def test_how_strict_failure_index_mismatch(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.'),
+                           DNA('C-')])
+        msa2 = TabularMSA([DNA('-C'),
+                           DNA('.G'),
+                           DNA('CA'),
+                           DNA('--')])
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'Index labels must all match'):
+            msa1.join(msa2)
+
+    def test_how_strict_failure_positional_metadata_mismatch(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.')],
+                          positional_metadata={'foo': [1, 2],
+                                               'bar': ['a', 'b']})
+        msa2 = TabularMSA([DNA('-C'),
+                           DNA('.G')],
+                          positional_metadata={'foo': [3, 4]})
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'Positional metadata columns.*match'):
+            msa1.join(msa2)
+
+    def test_how_inner(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.'),
+                           DNA('C-'),
+                           DNA('--')], index=[0, 1, 2, 3],
+                          positional_metadata={'foo': [1, 2],
+                                               'bar': ['a', 'b']})
+        msa2 = TabularMSA([DNA('-C'),
+                           DNA('.G'),
+                           DNA('CA'),
+                           DNA('..')], index=[2, 1, 0, -1],
+                          positional_metadata={'foo': [3, 4],
+                                               'baz': ['c', 'd']})
+
+        joined = msa1.join(msa2, how='inner')
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('C--C'),
+                        DNA('G..G'),
+                        DNA('ACCA')], index=[2, 1, 0],
+                       positional_metadata={'foo': [1, 2, 3, 4]}))
+
+    def test_how_inner_no_positional_metadata_overlap(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.')], index=['b', 'a'],
+                          positional_metadata={'foo': [1, 2]})
+        msa2 = TabularMSA([DNA('-C'),
+                           DNA('.G')], index=['a', 'b'],
+                          positional_metadata={'bar': ['c', 'd']})
+
+        joined = msa1.join(msa2, how='inner')
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('G.-C'),
+                        DNA('AC.G')], index=['a', 'b']))
+
+    def test_how_inner_no_index_overlap_with_positional_metadata_overlap(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.')],
+                          positional_metadata={'foo': [1, 2]})
+        msa2 = TabularMSA([DNA('-C'),
+                           DNA('.G')], index=['a', 'b'],
+                          positional_metadata={'foo': [3, 4]})
+
+        joined = msa1.join(msa2, how='inner')
+
+        self.assertEqual(joined, TabularMSA([]))
+
+    def test_how_outer(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.'),
+                           DNA('C-'),
+                           DNA('--')], index=[0, 1, 2, 3],
+                          positional_metadata={'foo': [1, 2],
+                                               'bar': ['a', 'b']})
+        msa2 = TabularMSA([DNA('-CC'),
+                           DNA('.GG'),
+                           DNA('CAA'),
+                           DNA('...')], index=[2, 1, 0, -1],
+                          positional_metadata={'foo': [3, 4, 5],
+                                               'baz': ['c', 'd', 'e']})
+
+        joined = msa1.join(msa2, how='outer')
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('--...'),
+                        DNA('ACCAA'),
+                        DNA('G..GG'),
+                        DNA('C--CC'),
+                        DNA('-----')], index=range(-1, 4),
+                       positional_metadata={
+                           'foo': [1, 2, 3, 4, 5],
+                           'bar': ['a', 'b', np.nan, np.nan, np.nan],
+                           'baz': [np.nan, np.nan, 'c', 'd', 'e']}))
+
+    def test_how_left(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.'),
+                           DNA('C-'),
+                           DNA('--')], index=[0, 1, 2, 3],
+                          positional_metadata={'foo': [1, 2],
+                                               'bar': ['a', 'b']})
+        msa2 = TabularMSA([DNA('-CC'),
+                           DNA('.GG'),
+                           DNA('CAA'),
+                           DNA('...')], index=[2, 1, 0, -1],
+                          positional_metadata={'foo': [3, 4, 5],
+                                               'baz': ['c', 'd', 'e']})
+
+        joined = msa1.join(msa2, how='left')
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('ACCAA'),
+                        DNA('G..GG'),
+                        DNA('C--CC'),
+                        DNA('-----')],
+                       positional_metadata={
+                           'foo': [1, 2, 3, 4, 5],
+                           'bar': ['a', 'b', np.nan, np.nan, np.nan]}))
+
+    def test_how_right(self):
+        msa1 = TabularMSA([DNA('AC'),
+                           DNA('G.'),
+                           DNA('C-'),
+                           DNA('--')], index=[0, 1, 2, 3],
+                          positional_metadata={'foo': [1, 2],
+                                               'bar': ['a', 'b']})
+        msa2 = TabularMSA([DNA('-CC'),
+                           DNA('.GG'),
+                           DNA('CAA'),
+                           DNA('...')], index=[2, 1, 0, -1],
+                          positional_metadata={'foo': [3, 4, 5],
+                                               'baz': ['c', 'd', 'e']})
+
+        joined = msa1.join(msa2, how='right')
+
+        self.assertEqual(
+            joined,
+            TabularMSA([DNA('C--CC'),
+                        DNA('G..GG'),
+                        DNA('ACCAA'),
+                        DNA('--...')], index=[2, 1, 0, -1],
+                       positional_metadata={
+                           'foo': [1, 2, 3, 4, 5],
+                           'baz': [np.nan, np.nan, 'c', 'd', 'e']}))
+
+
+class TestIterPositions(unittest.TestCase):
+    def test_method_return_type(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('GT')])
+
+        obs = msa.iter_positions()
+
+        self.assertIsInstance(obs, types.GeneratorType)
+
+    def test_position_type(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('GT')])
+
+        first_position = next(msa.iter_positions())
+
+        # Type should be *exactly* Sequence.
+        self.assertIs(type(first_position), Sequence)
+
+    def test_no_sequences(self):
+        msa = TabularMSA([])
+
+        obs = list(msa.iter_positions())
+
+        self.assertEqual(obs, [])
+
+    def test_no_sequences_reverse(self):
+        msa = TabularMSA([])
+
+        obs = list(msa.iter_positions(reverse=True))
+
+        self.assertEqual(obs, [])
+
+    def test_no_positions(self):
+        msa = TabularMSA([DNA(''),
+                          DNA('')])
+
+        obs = list(msa.iter_positions())
+
+        self.assertEqual(obs, [])
+
+    def test_no_positions_reverse(self):
+        msa = TabularMSA([DNA(''),
+                          DNA('')])
+
+        obs = list(msa.iter_positions(reverse=True))
+
+        self.assertEqual(obs, [])
+
+    def test_single_position(self):
+        msa = TabularMSA([DNA('A')])
+
+        obs = list(msa.iter_positions())
+
+        self.assertEqual(obs, [Sequence('A')])
+
+    def test_single_position_reverse(self):
+        msa = TabularMSA([DNA('A'),
+                          DNA('T')])
+
+        obs = list(msa.iter_positions(reverse=True))
+
+        self.assertEqual(obs, [Sequence('AT')])
+
+    def test_multiple_positions(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('A-G.'),
+                          DNA('----')])
+
+        obs = list(msa.iter_positions())
+
+        self.assertEqual(obs,
+                         [Sequence('AA-'), Sequence('C--'), Sequence('GG-'),
+                          Sequence('T.-')])
+
+    def test_multiple_positions_reverse(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('A-'),
+                          DNA('--')])
+
+        obs = list(msa.iter_positions(reverse=True))
+
+        self.assertEqual(obs,
+                         [Sequence('C--'), Sequence('AA-')])
+
+    def test_with_positional_metadata(self):
+        # MSA *and* sequence positional metadata.
+        msa_positional_metadata = {'pm1': [0.5, 1.5], 'foo': [9, 99]}
+        seqs = [
+            DNA('AC', positional_metadata={'foo': [42, 43]}),
+            DNA('A-'),
+            DNA('--', positional_metadata={'foo': [-1, -2],
+                                           'bar': ['baz', 'bazz']})]
+        msa = TabularMSA(seqs, positional_metadata=msa_positional_metadata)
+
+        obs = list(msa.iter_positions())
+
+        self.assertEqual(
+            obs,
+            [Sequence('AA-', metadata={'pm1': 0.5, 'foo': 9},
+                      positional_metadata={'foo': [42, np.nan, -1],
+                                           'bar': [np.nan, np.nan, 'baz']}),
+             Sequence('C--', metadata={'pm1': 1.5, 'foo': 99},
+                      positional_metadata={'foo': [43, np.nan, -2],
+                                           'bar': [np.nan, np.nan, 'bazz']})])
+
+    def test_with_positional_metadata_reverse(self):
+        # MSA *and* sequence positional metadata.
+        msa_positional_metadata = {'pm1': [0.5, 1.5], 'foo': [9, 99]}
+        seqs = [
+            DNA('AC', positional_metadata={'foo': [42, 43]}),
+            DNA('A-'),
+            DNA('--', positional_metadata={'foo': [-1, -2],
+                                           'bar': ['baz', 'bazz']})]
+        msa = TabularMSA(seqs, positional_metadata=msa_positional_metadata)
+
+        obs = list(msa.iter_positions(reverse=True))
+
+        self.assertEqual(
+            obs,
+            [Sequence('C--', metadata={'pm1': 1.5, 'foo': 99},
+                      positional_metadata={'foo': [43, np.nan, -2],
+                                           'bar': [np.nan, np.nan, 'bazz']}),
+             Sequence('AA-', metadata={'pm1': 0.5, 'foo': 9},
+                      positional_metadata={'foo': [42, np.nan, -1],
+                                           'bar': [np.nan, np.nan, 'baz']})])
+
+    def test_handles_missing_positional_metadata_efficiently(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('A-')])
+
+        self.assertIsNone(msa._positional_metadata)
+
+        list(msa.iter_positions())
+
+        self.assertIsNone(msa._positional_metadata)
+
+
+class TestConsensus(unittest.TestCase):
+    def test_no_sequences(self):
+        msa = TabularMSA([])
+
+        cons = msa.consensus()
+
+        self.assertEqual(cons, Sequence(''))
+
+    def test_no_positions(self):
+        msa = TabularMSA([DNA(''),
+                          DNA('')])
+
+        cons = msa.consensus()
+
+        self.assertEqual(cons, DNA(''))
+
+    def test_single_sequence(self):
+        msa = TabularMSA([DNA('ACGT-.')])
+
+        cons = msa.consensus()
+
+        self.assertEqual(cons, DNA('ACGT--'))
+
+    def test_multiple_sequences(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('AG-.'),
+                          DNA('AC-.')])
+
+        cons = msa.consensus()
+
+        self.assertEqual(cons, DNA('AC--'))
+
+    def test_ties(self):
+        msa = TabularMSA([DNA('A-'),
+                          DNA('C-'),
+                          DNA('G-')])
+
+        cons = msa.consensus()
+
+        self.assertTrue(cons in [DNA('A-'), DNA('C-'), DNA('G-')])
+
+    def test_ties_with_gaps(self):
+        msa = TabularMSA([DNA('-'),
+                          DNA('.'),
+                          DNA('T'),
+                          DNA('T')])
+
+        cons = msa.consensus()
+
+        self.assertTrue(cons in [DNA('T'), DNA('-')])
+
+    def test_default_gap_char(self):
+        msa = TabularMSA([DNA('.'),
+                          DNA('.'),
+                          DNA('.')])
+
+        cons = msa.consensus()
+
+        self.assertEqual(cons, DNA('-'))
+
+    def test_different_dtype(self):
+        msa = TabularMSA([RNA('---'),
+                          RNA('AG-'),
+                          RNA('AGG')])
+
+        cons = msa.consensus()
+
+        self.assertEqual(cons, RNA('AG-'))
+
+    def test_with_positional_metadata(self):
+        # Defining *all* types of metadata to ensure correct metadata is
+        # propagated to majority consensus sequence.
+        seqs = [
+            DNA('-.-', metadata={'id': 'seq1'},
+                positional_metadata={'qual': range(0, 3)}),
+            DNA('A.T', metadata={'id': 'seq2'},
+                positional_metadata={'qual': range(3, 6)}),
+            DNA('ACT', metadata={'id': 'seq3'},
+                positional_metadata={'qual': range(6, 9)})
+        ]
+        msa = TabularMSA(seqs, metadata={'pubmed': 123456},
+                         positional_metadata={'foo': [42, 43, 42],
+                                              'bar': ['a', 'b', 'c']})
+
+        cons = msa.consensus()
+
+        self.assertEqual(
+            cons,
+            DNA('A-T', positional_metadata={'foo': [42, 43, 42],
+                                            'bar': ['a', 'b', 'c']}))
+
+    def test_handles_missing_positional_metadata_efficiently(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('AC')])
+
+        self.assertIsNone(msa._positional_metadata)
+
+        cons = msa.consensus()
+
+        self.assertIsNone(msa._positional_metadata)
+        self.assertIsNone(cons._positional_metadata)
+
+    def test_mixed_gap_characters_as_majority(self):
+        seqs = [
+            DNA('A'),
+            DNA('A'),
+            DNA('A'),
+            DNA('A'),
+            DNA('.'),
+            DNA('.'),
+            DNA('.'),
+            DNA('-'),
+            DNA('-')
+        ]
+        msa = TabularMSA(seqs)
+
+        cons = msa.consensus()
+
+        self.assertEqual(cons, DNA('-'))
+
+
+class TestConservation(unittest.TestCase):
+
+    def test_no_sequences(self):
+        msa = TabularMSA([])
+        cons = msa.conservation()
+        npt.assert_array_equal(cons, np.array([]))
+
+    def test_shannon_entropy_dna(self):
+        msa = TabularMSA([DNA('A'),
+                          DNA('G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([DNA('A'),
+                          DNA('G'),
+                          DNA('C'),
+                          DNA('G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.25, 0.25],
+                                                      base=4)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([DNA('AAC'),
+                          DNA('GAC')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([DNA('AACT'),
+                          DNA('GACA')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([0.5, 0.5], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+    def test_shannon_entropy_rna(self):
+        msa = TabularMSA([RNA('A'),
+                          RNA('G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([RNA('A'),
+                          RNA('G'),
+                          RNA('C'),
+                          RNA('G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.25, 0.25],
+                                                      base=4)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([RNA('AAC'),
+                          RNA('GAC')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([RNA('AACU'),
+                          RNA('GACA')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([0.5, 0.5], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+    def test_shannon_entropy_protein(self):
+        msa = TabularMSA([Protein('A'),
+                          Protein('G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=20)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([Protein('A'),
+                          Protein('G'),
+                          Protein('C'),
+                          Protein('G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.25, 0.25],
+                                                      base=20)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([Protein('AAC'),
+                          Protein('GAC')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=20),
+                             1. - scipy.stats.entropy([1.0], base=20),
+                             1. - scipy.stats.entropy([1.0], base=20)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([Protein('AACT'),
+                          Protein('GACA')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=20),
+                             1. - scipy.stats.entropy([1.0], base=20),
+                             1. - scipy.stats.entropy([1.0], base=20),
+                             1. - scipy.stats.entropy([0.5, 0.5], base=20)])
+        npt.assert_array_equal(actual, expected)
+
+    def test_degenerate_mode_nan(self):
+        msa = TabularMSA([DNA('NAC'),
+                          DNA('NNC')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  degenerate_mode='nan')
+        expected = np.array([np.nan,
+                             np.nan,
+                             1. - scipy.stats.entropy([1.0], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+    def test_degenerate_mode_error(self):
+        msa = TabularMSA([DNA('NACN'),
+                          DNA('NNCA')])
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='error')
+
+        msa = TabularMSA([DNA('AACA'),
+                          DNA('ANCA')])
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='error')
+
+    def test_error_on_degenerate_w_nan_on_gap(self):
+        msa = TabularMSA([DNA('-ACA'),
+                          DNA('-NCA')])
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='error',
+                          gap_mode='nan')
+
+    def test_column_with_degen_and_gap(self):
+        msa = TabularMSA([DNA('N'),
+                          DNA('-')])
+        # test all eight combinations of gap_mode and degenerate_mode
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  degenerate_mode='nan',
+                                  gap_mode='nan')
+        npt.assert_array_equal(actual, np.array([np.nan]))
+
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  degenerate_mode='nan',
+                                  gap_mode='ignore')
+        npt.assert_array_equal(actual, np.array([np.nan]))
+
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  degenerate_mode='nan',
+                                  gap_mode='include')
+        npt.assert_array_equal(actual, np.array([np.nan]))
+
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='nan',
+                          gap_mode='error')
+
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='error',
+                          gap_mode='nan')
+
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='error',
+                          gap_mode='error')
+
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='error',
+                          gap_mode='include')
+
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          degenerate_mode='error',
+                          gap_mode='ignore')
+
+    def test_gap_mode_nan(self):
+        msa = TabularMSA([DNA('-AC.'),
+                          DNA('--CA')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  gap_mode='nan')
+        expected = np.array([np.nan,
+                             np.nan,
+                             1. - scipy.stats.entropy([1.0], base=4),
+                             np.nan])
+        npt.assert_array_equal(actual, expected)
+
+    def test_gap_mode_include(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('-G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  gap_mode='include')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=5),
+                             1. - scipy.stats.entropy([0.5, 0.5], base=5)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([DNA('AC'),
+                          DNA('.G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  gap_mode='include')
+        expected = np.array([1. - scipy.stats.entropy([0.5, 0.5], base=5),
+                             1. - scipy.stats.entropy([0.5, 0.5], base=5)])
+        npt.assert_array_equal(actual, expected)
+
+    def test_gap_mode_include_gaps_treated_as_single_char(self):
+        msa = TabularMSA([DNA('.'),
+                          DNA('-')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  gap_mode='include')
+        expected = np.array([1. - scipy.stats.entropy([1.0], base=5)])
+        npt.assert_array_equal(actual, expected)
+
+    def test_gap_mode_ignore(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('-G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  gap_mode='ignore')
+        expected = np.array([1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([0.5, 0.5], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+        msa = TabularMSA([DNA('AC'),
+                          DNA('.G')])
+        actual = msa.conservation(metric='inverse_shannon_uncertainty',
+                                  gap_mode='ignore')
+        expected = np.array([1. - scipy.stats.entropy([1.0], base=4),
+                             1. - scipy.stats.entropy([0.5, 0.5], base=4)])
+        npt.assert_array_equal(actual, expected)
+
+    def test_gap_mode_error(self):
+        msa = TabularMSA([DNA('-AC-'),
+                          DNA('--CA')])
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          gap_mode="error")
+
+        msa = TabularMSA([DNA('AACA'),
+                          DNA('A-CA')])
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          gap_mode="error")
+
+        msa = TabularMSA([DNA('AACA'),
+                          DNA('A.CA')])
+        self.assertRaises(ValueError, msa.conservation,
+                          metric='inverse_shannon_uncertainty',
+                          gap_mode="error")
+
+    def test_bad_metric(self):
+        msa = TabularMSA([DNA('AA'),
+                          DNA('A-')])
+        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+            msa.conservation(metric='xyz')
+
+        msa = TabularMSA([])
+        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+            msa.conservation(metric='xyz')
+
+    def test_bad_gap_mode(self):
+        msa = TabularMSA([DNA('AA'),
+                          DNA('A-')])
+        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+            msa.conservation(gap_mode='xyz')
+
+        msa = TabularMSA([])
+        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+            msa.conservation(gap_mode='xyz')
+
+    def test_bad_degenerate_mode(self):
+        msa = TabularMSA([DNA('AA'),
+                          DNA('A-')])
+        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+            msa.conservation(degenerate_mode='xyz')
+
+        msa = TabularMSA([])
+        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+            msa.conservation(degenerate_mode='xyz')
+
+
+class TestGapFrequencies(unittest.TestCase):
+    def test_default_behavior(self):
+        msa = TabularMSA([DNA('AA.'),
+                          DNA('-A-')])
+
+        freqs = msa.gap_frequencies()
+
+        npt.assert_array_equal(np.array([1, 0, 2]), freqs)
+
+    def test_invalid_axis_str(self):
+        with six.assertRaisesRegex(self, ValueError, "axis.*'foo'"):
+            TabularMSA([]).gap_frequencies(axis='foo')
+
+    def test_invalid_axis_int(self):
+        with six.assertRaisesRegex(self, ValueError, "axis.*2"):
+            TabularMSA([]).gap_frequencies(axis=2)
+
+    def test_position_axis_str_and_int_equivalent(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('A.G-'),
+                          DNA('----')])
+
+        str_freqs = msa.gap_frequencies(axis='position')
+        int_freqs = msa.gap_frequencies(axis=1)
+
+        npt.assert_array_equal(str_freqs, int_freqs)
+        npt.assert_array_equal(np.array([0, 2, 4]), str_freqs)
+
+    def test_sequence_axis_str_and_int_equivalent(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('A.G-'),
+                          DNA('----')])
+
+        str_freqs = msa.gap_frequencies(axis='sequence')
+        int_freqs = msa.gap_frequencies(axis=0)
+
+        npt.assert_array_equal(str_freqs, int_freqs)
+        npt.assert_array_equal(np.array([1, 2, 1, 2]), str_freqs)
+
+    def test_correct_dtype_absolute_empty(self):
+        msa = TabularMSA([])
+
+        freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([]), freqs)
+        self.assertEqual(int, freqs.dtype)
+
+    def test_correct_dtype_relative_empty(self):
+        msa = TabularMSA([])
+
+        freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([]), freqs)
+        self.assertEqual(float, freqs.dtype)
+
+    def test_correct_dtype_absolute_non_empty(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('-.')])
+
+        freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([0, 2]), freqs)
+        self.assertEqual(int, freqs.dtype)
+
+    def test_correct_dtype_relative_non_empty(self):
+        msa = TabularMSA([DNA('AC'),
+                          DNA('-.')])
+
+        freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([0.0, 1.0]), freqs)
+        self.assertEqual(float, freqs.dtype)
+
+    def test_no_sequences_absolute(self):
+        msa = TabularMSA([])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence')
+        pos_freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([]), seq_freqs)
+        npt.assert_array_equal(np.array([]), pos_freqs)
+
+    def test_no_sequences_relative(self):
+        msa = TabularMSA([])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
+        pos_freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([]), seq_freqs)
+        npt.assert_array_equal(np.array([]), pos_freqs)
+
+    def test_no_positions_absolute(self):
+        msa = TabularMSA([DNA('')])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence')
+        pos_freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([]), seq_freqs)
+        npt.assert_array_equal(np.array([0]), pos_freqs)
+
+    def test_no_positions_relative(self):
+        msa = TabularMSA([DNA('')])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
+        pos_freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([]), seq_freqs)
+        npt.assert_array_equal(np.array([np.nan]), pos_freqs)
+
+    def test_single_sequence_absolute(self):
+        msa = TabularMSA([DNA('.T')])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence')
+        pos_freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([1, 0]), seq_freqs)
+        npt.assert_array_equal(np.array([1]), pos_freqs)
+
+    def test_single_sequence_relative(self):
+        msa = TabularMSA([DNA('.T')])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
+        pos_freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([1.0, 0.0]), seq_freqs)
+        npt.assert_array_equal(np.array([0.5]), pos_freqs)
+
+    def test_single_position_absolute(self):
+        msa = TabularMSA([DNA('.'),
+                          DNA('T')])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence')
+        pos_freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([1]), seq_freqs)
+        npt.assert_array_equal(np.array([1, 0]), pos_freqs)
+
+    def test_single_position_relative(self):
+        msa = TabularMSA([DNA('.'),
+                          DNA('T')])
+
+        seq_freqs = msa.gap_frequencies(axis='sequence', relative=True)
+        pos_freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([0.5]), seq_freqs)
+        npt.assert_array_equal(np.array([1.0, 0.0]), pos_freqs)
+
+    def test_position_axis_absolute(self):
+        msa = TabularMSA([
+                DNA('ACGT'),   # no gaps
+                DNA('A.G-'),   # some gaps (mixed gap chars)
+                DNA('----'),   # all gaps
+                DNA('....')])  # all gaps
+
+        freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([0, 2, 4, 4]), freqs)
+
+    def test_position_axis_relative(self):
+        msa = TabularMSA([DNA('ACGT'),
+                          DNA('A.G-'),
+                          DNA('CCC.'),
+                          DNA('----'),
+                          DNA('....')])
+
+        freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([0.0, 0.5, 0.25, 1.0, 1.0]), freqs)
+
+    def test_sequence_axis_absolute(self):
+        msa = TabularMSA([DNA('AC-.'),
+                          DNA('A.-.'),
+                          DNA('G--.')])
+
+        freqs = msa.gap_frequencies(axis='sequence')
+
+        npt.assert_array_equal(np.array([0, 2, 3, 3]), freqs)
+
+    def test_sequence_axis_relative(self):
+        msa = TabularMSA([DNA('AC--.'),
+                          DNA('A.A-.'),
+                          DNA('G-A-.')])
+
+        freqs = msa.gap_frequencies(axis='sequence', relative=True)
+
+        npt.assert_array_equal(np.array([0.0, 2/3, 1/3, 1.0, 1.0]), freqs)
+
+    def test_relative_frequencies_precise(self):
+        class CustomSequence(IUPACSequence):
+            @classproperty
+            @overrides(IUPACSequence)
+            def gap_chars(cls):
+                return set('0123456789')
+
+            @classproperty
+            @overrides(IUPACSequence)
+            def nondegenerate_chars(cls):
+                return set('')
+
+            @classproperty
+            @overrides(IUPACSequence)
+            def degenerate_map(cls):
+                return {}
+
+        msa = TabularMSA([CustomSequence('0123456789')])
+
+        freqs = msa.gap_frequencies(axis='position', relative=True)
+
+        npt.assert_array_equal(np.array([1.0]), freqs)
+
+    def test_custom_gap_characters(self):
+        class CustomSequence(IUPACSequence):
+            @classproperty
+            @overrides(IUPACSequence)
+            def gap_chars(cls):
+                return set('#$*')
+
+            @classproperty
+            @overrides(IUPACSequence)
+            def nondegenerate_chars(cls):
+                return set('ABC-.')
+
+            @classproperty
+            @overrides(IUPACSequence)
+            def degenerate_map(cls):
+                return {'D': 'ABC-.'}
+
+        msa = TabularMSA([CustomSequence('ABCD'),
+                          CustomSequence('-.-.'),
+                          CustomSequence('A#C*'),
+                          CustomSequence('####'),
+                          CustomSequence('$$$$')])
+
+        freqs = msa.gap_frequencies(axis='position')
+
+        npt.assert_array_equal(np.array([0, 0, 2, 4, 4]), freqs)
+
+
+class TestGetPosition(unittest.TestCase):
+    def test_without_positional_metadata(self):
+        msa = TabularMSA([DNA('ACG'),
+                          DNA('A-G')])
+
+        position = msa._get_position_(1)
+
+        self.assertEqual(position, Sequence('C-'))
+
+    def test_with_positional_metadata(self):
+        msa = TabularMSA([DNA('ACG'),
+                          DNA('A-G')],
+                         positional_metadata={'foo': [42, 43, 44],
+                                              'bar': ['abc', 'def', 'ghi']})
+
+        position = msa._get_position_(1)
+
+        self.assertEqual(position,
+                         Sequence('C-', metadata={'foo': 43, 'bar': 'def'}))
+
+    def test_handles_positional_metadata_efficiently(self):
+        msa = TabularMSA([DNA('AA'),
+                          DNA('--')])
+
+        msa._get_position_(1)
+
+        self.assertIsNone(msa._positional_metadata)
+
+
+class TestIsSequenceAxis(unittest.TestCase):
+    def setUp(self):
+        self.msa = TabularMSA([])
+
+    def test_invalid_str(self):
+        with six.assertRaisesRegex(self, ValueError, "axis.*'foo'"):
+            self.msa._is_sequence_axis('foo')
+
+    def test_invalid_int(self):
+        with six.assertRaisesRegex(self, ValueError, "axis.*2"):
+            self.msa._is_sequence_axis(2)
+
+    def test_positive_str(self):
+        self.assertTrue(self.msa._is_sequence_axis('sequence'))
+
+    def test_positive_int(self):
+        self.assertTrue(self.msa._is_sequence_axis(0))
+
+    def test_negative_str(self):
+        self.assertFalse(self.msa._is_sequence_axis('position'))
+
+    def test_negative_int(self):
+        self.assertFalse(self.msa._is_sequence_axis(1))
+
+
+class TestRepr(unittest.TestCase):
+    def test_repr(self):
+        # basic sanity checks -- more extensive testing of formatting and
+        # special cases is performed in TabularMSAReprDoctests below. here we
+        # only test that pieces of the repr are present. these tests also
+        # exercise coverage for py2/3 since the doctests in
+        # TabularMSAReprDoctests only currently run in py3.
+
+        # str calls repr
+        self.assertEqual(repr(TabularMSA([])), str(TabularMSA([])))
+        self.assertEqual(repr(TabularMSA([DNA('')])),
+                         str(TabularMSA([DNA('')])))
+        self.assertEqual(repr(TabularMSA([DNA('ACGT')])),
+                         str(TabularMSA([DNA('ACGT')])))
+        self.assertEqual(repr(TabularMSA([DNA('ACGT'*25) for x in range(10)])),
+                         str(TabularMSA([DNA('ACGT'*25) for x in range(10)])))
+
+        # empty
+        obs = repr(TabularMSA([]))
+        self.assertEqual(obs.count('\n'), 5)
+        self.assertTrue(obs.startswith('TabularMSA'))
+        self.assertIn('sequence count: 0', obs)
+        self.assertIn('position count: 0', obs)
+
+        # minimal
+        obs = repr(TabularMSA([DNA('')]))
+        self.assertEqual(obs.count('\n'), 5)
+        self.assertTrue(obs.startswith('TabularMSA'))
+        self.assertIn('sequence count: 1', obs)
+        self.assertIn('position count: 0', obs)
+        self.assertIn('[DNA]', obs)
+
+        # no metadata
+        obs = repr(TabularMSA([DNA('ACGT')]))
+        self.assertEqual(obs.count('\n'), 6)
+        self.assertTrue(obs.startswith('TabularMSA'))
+        self.assertIn('sequence count: 1', obs)
+        self.assertIn('position count: 4', obs)
+        self.assertIn('[DNA]', obs)
+        self.assertTrue(obs.endswith('ACGT'))
+
+        # sequence spanning > 5 lines
+        obs = repr(TabularMSA([DNA('A' * 71) for x in range(6)]))
+        self.assertEqual(obs.count('\n'), 10)
+        self.assertTrue(obs.startswith('TabularMSA'))
+        self.assertIn('sequence count: 6', obs)
+        self.assertIn('position count: 71', obs)
+        self.assertIn('\n...\n', obs)
+        self.assertIn('[DNA]', obs)
+        self.assertTrue(obs.endswith('AAAA'))
+
+        # sequences overflowing
+        obs = repr(TabularMSA([DNA('A' * 72)]))
+        self.assertEqual(obs.count('\n'), 6)
+        self.assertTrue(obs.startswith('TabularMSA'))
+        self.assertIn('sequence count: 1', obs)
+        self.assertIn('position count: 72', obs)
+        self.assertIn('[DNA]', obs)
+        self.assertTrue(obs.endswith(' ... ' + 'A'*33))
+
+
+# NOTE: this must be a *separate* class for doctests only (no unit tests). nose
+# will not run the unit tests otherwise
+#
+# these doctests exercise the correct formatting of TabularMSA's repr in a
+# variety of situations. they are more extensive than the unit tests above
+# (TestRepr.test_repr) but are only currently run in py3. thus, they cannot
+# be relied upon for coverage (the unit tests take care of this)
+class TabularMSAReprDoctests(object):
+    r"""
+    >>> from skbio import DNA, TabularMSA
+
+    Empty (minimal) MSA:
+
+    >>> TabularMSA([])
+    TabularMSA
+    ---------------------
+    Stats:
+        sequence count: 0
+        position count: 0
+    ---------------------
+
+    MSA with single empty sequence:
+
+    >>> TabularMSA([DNA('')])
+    TabularMSA[DNA]
+    ---------------------
+    Stats:
+        sequence count: 1
+        position count: 0
+    ---------------------
+
+    MSA with single sequence with single character:
+
+    >>> TabularMSA([DNA('G')])
+    TabularMSA[DNA]
+    ---------------------
+    Stats:
+        sequence count: 1
+        position count: 1
+    ---------------------
+    G
+
+    MSA with multicharacter sequence:
+
+    >>> TabularMSA([DNA('ACGT')])
+    TabularMSA[DNA]
+    ---------------------
+    Stats:
+        sequence count: 1
+        position count: 4
+    ---------------------
+    ACGT
+
+    Full single line:
+
+    >>> TabularMSA([DNA('A' * 71)])
+    TabularMSA[DNA]
+    -----------------------------------------------------------------------
+    Stats:
+        sequence count: 1
+        position count: 71
+    -----------------------------------------------------------------------
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+
+    Full single line with 1 character overflow:
+
+    >>> TabularMSA([DNA('A' * 72)])
+    TabularMSA[DNA]
+    -----------------------------------------------------------------------
+    Stats:
+        sequence count: 1
+        position count: 72
+    -----------------------------------------------------------------------
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ... AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+
+    Two sequences with full lines:
+
+    >>> TabularMSA([DNA('T' * 71), DNA('T' * 71)])
+    TabularMSA[DNA]
+    -----------------------------------------------------------------------
+    Stats:
+        sequence count: 2
+        position count: 71
+    -----------------------------------------------------------------------
+    TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
+    TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
+
+    Two sequences with full lines with 1 character overflow:
+
+    >>> TabularMSA([DNA('T' * 72), DNA('T' * 72)])
+    TabularMSA[DNA]
+    -----------------------------------------------------------------------
+    Stats:
+        sequence count: 2
+        position count: 72
+    -----------------------------------------------------------------------
+    TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT ... TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
+    TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT ... TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
+
+    Five full lines (maximum amount of information):
+
+    >>> TabularMSA([DNA('A' * 71) for x in range(5)])
+    TabularMSA[DNA]
+    -----------------------------------------------------------------------
+    Stats:
+        sequence count: 5
+        position count: 71
+    -----------------------------------------------------------------------
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+
+    Six lines starts "summarized" output:
+
+    >>> TabularMSA([DNA('A' * 71) for x in range(6)])
+    TabularMSA[DNA]
+    -----------------------------------------------------------------------
+    Stats:
+        sequence count: 6
+        position count: 71
+    -----------------------------------------------------------------------
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+    ...
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+    AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+
+    Supply horrendous metadata and positional metadata to exercise a variety of
+    metadata formatting cases and rules. Sorting should be by type, then by
+    value within each type (Python 3 doesn't allow sorting of mixed types):
+
+    >>> metadata = {
+    ...     # str key, str value
+    ...     'abc': 'some description',
+    ...     # int value
+    ...     'foo': 42,
+    ...     # unsupported type (dict) value
+    ...     'bar': {},
+    ...     # int key, wrapped str (single line)
+    ...     42: 'some words to test text wrapping and such... yada yada yada '
+    ...         'yada yada yada yada yada.',
+    ...     # bool key, wrapped str (multi-line)
+    ...     True: 'abc ' * 34,
+    ...     # float key, truncated str (too long)
+    ...     42.5: 'abc ' * 200,
+    ...     # unsupported type (tuple) key, unsupported type (list) value
+    ...     ('foo', 'bar'): [1, 2, 3],
+    ...     # bytes key, single long word that wraps
+    ...     b'long word': 'abc' * 30,
+    ...     # truncated key (too long), None value
+    ...     'too long of a key name to display in repr': None,
+    ...     # wrapped bytes value (has b'' prefix)
+    ...     'bytes wrapped value': b'abcd' * 25,
+    ...     # float value
+    ...     0.1: 99.9999,
+    ...     # bool value
+    ...     43: False,
+    ...     # None key, complex value
+    ...     None: complex(-1.0, 0.0),
+    ...     # nested quotes
+    ...     10: '"\''
+    ... }
+    >>> positional_metadata = pd.DataFrame.from_items([
+    ...     # str key, int list value
+    ...     ('foo', [1, 2, 3, 4]),
+    ...     # float key, float list value
+    ...     (42.5, [2.5, 3.0, 4.2, -0.00001]),
+    ...     # int key, object list value
+    ...     (42, [[], 4, 5, {}]),
+    ...     # truncated key (too long), bool list value
+    ...     ('abc' * 90, [True, False, False, True]),
+    ...     # None key
+    ...     (None, range(4))])
+    >>> TabularMSA([DNA('ACGT')], metadata=metadata,
+    ...            positional_metadata=positional_metadata)
+    TabularMSA[DNA]
+    -----------------------------------------------------------------------
+    Metadata:
+        None: (-1+0j)
+        True: 'abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
+               abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
+               abc abc abc abc '
+        b'long word': 'abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabca
+                       bcabcabcabcabcabcabcabcabcabcabcabcabc'
+        0.1: 99.9999
+        42.5: <class 'str'>
+        10: '"\''
+        42: 'some words to test text wrapping and such... yada yada yada
+             yada yada yada yada yada.'
+        43: False
+        'abc': 'some description'
+        'bar': <class 'dict'>
+        'bytes wrapped value': b'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab
+                                 cdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
+                                 abcdabcdabcdabcd'
+        'foo': 42
+        <class 'str'>: None
+        <class 'tuple'>: <class 'list'>
+    Positional metadata:
+        'foo': <dtype: int64>
+        42.5: <dtype: float64>
+        42: <dtype: object>
+        <class 'str'>: <dtype: bool>
+        None: <dtype: int64>
+    Stats:
+        sequence count: 1
+        position count: 4
+    -----------------------------------------------------------------------
+    ACGT
+
+    """
+    pass
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/skbio/diversity/__init__.py b/skbio/diversity/__init__.py
index 24d4937..0f6b613 100644
--- a/skbio/diversity/__init__.py
+++ b/skbio/diversity/__init__.py
@@ -4,8 +4,146 @@ Diversity calculations (:mod:`skbio.diversity`)
 
 .. currentmodule:: skbio.diversity
 
-This package provides functionality for calculating community diversity,
-including various alpha- and beta-diversity measures.
+This package provides functionality for analyzing biological diversity. It
+implements metrics of alpha and beta diversity, and provides two "driver
+functions" that are intended to be the primary interface for computing alpha
+and beta diversity with scikit-bio. Functions are additionally provided that
+support discovery of the available diversity metrics. This document provides a
+high-level discussion of how to work with the ``skbio.diversity`` module, and
+should be the first document you read before working with the module.
+
+Driver functions
+----------------
+
+The driver functions, ``skbio.diversity.alpha_diversity`` and
+``skbio.diversity.beta_diversity``, are designed to compute alpha diversity for
+one or more samples, or beta diversity for one or more pairs of samples. The
+diversity driver functions accept a matrix containing vectors of frequencies of
+OTUs within each sample.
+
+We use the term "OTU" here very loosely, as these can in practice represent
+diverse feature types including bacterial species, genes, and metabolites. The
+term "sample" is also loosely defined for these purposes. These are intended to
+represent a single unit of sampling, and as such what a single sample
+represents can vary widely. For example, in a microbiome survey, these could
+represent all 16S rRNA gene sequences from a single oral swab. In a comparative
+genomics study on the other hand, a sample could represent an individual
+organism's genome.
+
+Each frequency in a given vector represents the number of individuals observed
+for a particular OTU. We will refer to the frequencies associated with a single
+sample as a *counts vector* or ``counts`` throughout the documentation. Counts
+vectors are `array_like`: anything that can be converted into a 1-D numpy array
+is acceptable input. For example, you can provide a numpy array or a native
+Python list and the results will be identical. As mentioned above, the driver
+functions accept one or more of these vectors (representing one or more
+samples) in a matrix which is also `array_like`. Each row in the matrix
+represents a single sample's count vector, so that rows represent samples and
+columns represent OTUs.
+
+Some diversity metrics incorporate relationships between the OTUs in their
+computation through reference to a phylogenetic tree. These metrics
+additionally take a ``skbio.TreeNode`` object and a list of OTU identifiers
+mapping the values in the counts vector to tips in the tree.
+
+The driver functions are optimized so that computing a diversity metric more
+than one time (i.e., for more than one sample for alpha diversity metrics, or
+more than one pair of samples for beta diversity metrics) is often much faster
+than repeated calls to the metric. For this reason, the driver functions take
+matrices of counts vectors rather than a single counts vector for alpha
+diversity metrics or two counts vectors for beta diversity metrics. The
+``alpha_diversity`` driver function will thus compute alpha diversity for all
+counts vectors in the matrix, and the ``beta_diversity`` driver function will
+compute beta diversity for all pairs of counts vectors in the matrix.
+
+Input validation
+----------------
+
+The driver functions perform validation of input by default. Validation can be
+slow so it is possible to disable this step by passing ``validate=False``. This
+can be dangerous however. If invalid input is encountered when validation is
+disabled it can result in difficult-to-interpret error messages or incorrect
+results. We therefore recommend that users are careful to ensure that their
+input data is valid before disabling validation.
+
+The conditions that the driver functions validate follow. If disabling
+validation, users should be confident that these conditions are met.
+
+* the data in the counts vectors can be safely cast to integers
+* there are no negative values in the counts vectors
+* each counts vector is one dimensional
+* the counts matrix is two dimensional
+* all counts vectors are of equal length
+
+Additionally, if a phylogenetic diversity metric is being computed, the
+following conditions are also confirmed:
+
+* the provided OTU identifiers are all unique
+* the length of each counts vector is equal to the number of OTU identifiers
+* the provided tree is rooted
+* the tree has more than one node
+* all nodes in the provided tree except for the root node have branch lengths
+* all tip names in the provided tree are unique
+* all provided OTU identifiers correspond to tip names in the provided tree
+
+Count vectors
+-------------
+
+There are different ways that count vectors are represented in the ecological
+literature and in related software. The diversity measures provided here
+*always* assume that the input contains abundance data: each count represents
+the number of individuals observed for a particular OTU in the sample. For
+example, if you have two OTUs, where three individuals were observed from the
+first OTU and only a single individual was observed from the second OTU, you
+could represent this data in the following forms (among others).
+
+As a vector of counts. This is the expected type of input for the diversity
+measures in this module. There are 3 individuals from the OTU at index 0, and 1
+individual from the OTU at index 1:
+
+>>> counts = [3, 1]
+
+As a vector of indices. The OTU at index 0 is observed 3 times, while the
+OTU at index 1 is observed 1 time:
+
+>>> indices = [0, 0, 0, 1]
+
+As a vector of frequencies. We have 1 OTU that is a singleton and 1 OTU that
+is a tripleton. We do not have any 0-tons or doubletons:
+
+>>> frequencies = [0, 1, 0, 1]
+
+Always use the first representation (a counts vector) with this module.
+
+Specifying a diversity metric
+-----------------------------
+
+The driver functions take a parameter, ``metric``, that specifies which
+diversity metric should be applied. The value that you provide for ``metric``
+can be either a string (e.g., ``"faith_pd"``) or a function (e.g.,
+``skbio.diversity.alpha.faith_pd``). The metric should generally be passed as a
+string, as this often uses an optimized version of the metric. For example,
+passing  ``metric="faith_pd"`` (a string) to ``alpha_diversity`` will be tens
+of times faster than passing ``metric=skbio.diversity.alpha.faith_pd`` (a
+function) when computing Faith's PD on about 100 samples.  Similarly, passing
+``metric="unweighted_unifrac"`` (a string) will be hundreds of times
+faster than passing ``metric=skbio.diversity.beta.unweighted_unifrac`` (a
+function) when computing unweighted UniFrac on about 100 samples. The latter
+may be faster if computing only one alpha or beta diversity value, but in
+these cases the run times will likely be so small that the difference will be
+negligible. **We therefore recommend that you always pass the metric as a
+string when possible.**
+
+Passing a metric as a string will not be possible if the metric you'd like to
+run is not one that scikit-bio knows about. This might be the case, for
+example, if you're applying a custom metric that you've developed. To discover
+the metric names that scikit-bio knows about as strings that can be passed as
+``metric`` to ``alpha_diversity`` or ``beta_diversity``, you can call
+``get_alpha_diversity_metrices`` or ``get_beta_diversity_metrics``,
+respectively. These functions return lists of alpha and beta diversity metrics
+that are implemented in scikit-bio. There may be additional metrics that can be
+passed as strings which won't be listed here, such as those implemented in
+``scipy.spatial.distance.pdist``.
 
 Subpackages
 -----------
@@ -16,6 +154,239 @@ Subpackages
    alpha
    beta
 
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+    alpha_diversity
+    beta_diversity
+    get_alpha_diversity_metrics
+    get_beta_diversity_metrics
+
+Examples
+--------
+
+Create a matrix containing 6 samples (rows) and 7 OTUs (columns):
+
+.. plot::
+   :context:
+
+   >>> data = [[23, 64, 14, 0, 0, 3, 1],
+   ...         [0, 3, 35, 42, 0, 12, 1],
+   ...         [0, 5, 5, 0, 40, 40, 0],
+   ...         [44, 35, 9, 0, 1, 0, 0],
+   ...         [0, 2, 8, 0, 35, 45, 1],
+   ...         [0, 0, 25, 35, 0, 19, 0]]
+   >>> ids = list('ABCDEF')
+
+   First, we'll compute observed OTUs, an alpha diversity metric, for each
+   sample using the ``alpha_diversity`` driver function:
+
+   >>> from skbio.diversity import alpha_diversity
+   >>> adiv_obs_otus = alpha_diversity('observed_otus', data, ids)
+   >>> adiv_obs_otus
+   A    5
+   B    5
+   C    4
+   D    4
+   E    5
+   F    3
+   dtype: int64
+
+   Next we'll compute Faith's PD on the same samples. Since this is a
+   phylogenetic diversity metric, we'll first create a tree and an ordered
+   list of OTU identifiers.
+
+   >>> from skbio import TreeNode
+   >>> from io import StringIO
+   >>> tree = TreeNode.read(StringIO(
+   ...                      '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
+   ...                      '(OTU4:0.75,(OTU5:0.5,(OTU6:0.5,OTU7:0.5):0.5):'
+   ...                      '0.5):1.25):0.0)root;'))
+   >>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7']
+   >>> adiv_faith_pd = alpha_diversity('faith_pd', data, ids=ids,
+   ...                                 otu_ids=otu_ids, tree=tree)
+   >>> adiv_faith_pd
+   A    6.75
+   B    7.00
+   C    6.25
+   D    5.75
+   E    6.75
+   F    5.50
+   dtype: float64
+
+   Now we'll compute Bray-Curtis distances, a beta diversity metric, between
+   all pairs of samples. Notice that the ``data`` and ``ids`` parameters
+   provided to ``beta_diversity`` are the same as those provided to
+   ``alpha_diversity``.
+
+   >>> from skbio.diversity import beta_diversity
+   >>> bc_dm = beta_diversity("braycurtis", data, ids)
+   >>> print(bc_dm)
+   6x6 distance matrix
+   IDs:
+   'A', 'B', 'C', 'D', 'E', 'F'
+   Data:
+   [[ 0.          0.78787879  0.86666667  0.30927835  0.85714286  0.81521739]
+    [ 0.78787879  0.          0.78142077  0.86813187  0.75        0.1627907 ]
+    [ 0.86666667  0.78142077  0.          0.87709497  0.09392265  0.71597633]
+    [ 0.30927835  0.86813187  0.87709497  0.          0.87777778  0.89285714]
+    [ 0.85714286  0.75        0.09392265  0.87777778  0.          0.68235294]
+    [ 0.81521739  0.1627907   0.71597633  0.89285714  0.68235294  0.        ]]
+
+   Next, we'll compute weighted UniFrac distances between all pairs of samples.
+   Because weighted UniFrac is a phylogenetic beta diversity metric, we'll need
+   to pass the ``skbio.TreeNode`` and list of OTU ids that we created above.
+   Again, these are the same values that were provided to ``alpha_diversity``.
+
+   >>> wu_dm = beta_diversity("weighted_unifrac", data, ids, tree=tree,
+   ...                        otu_ids=otu_ids)
+   >>> print(wu_dm)
+   6x6 distance matrix
+   IDs:
+   'A', 'B', 'C', 'D', 'E', 'F'
+   Data:
+   [[ 0.          2.77549923  3.82857143  0.42512039  3.8547619   3.10937312]
+    [ 2.77549923  0.          2.26433692  2.98435423  2.24270353  0.46774194]
+    [ 3.82857143  2.26433692  0.          3.95224719  0.16025641  1.86111111]
+    [ 0.42512039  2.98435423  3.95224719  0.          3.98796148  3.30870431]
+    [ 3.8547619   2.24270353  0.16025641  3.98796148  0.          1.82967033]
+    [ 3.10937312  0.46774194  1.86111111  3.30870431  1.82967033  0.        ]]
+
+   Next we'll do some work with these beta diversity distance matrices. First,
+   we'll determine if the UniFrac and Bray-Curtis distance matrices are
+   significantly correlated by computing the Mantel correlation between them.
+   Then we'll determine if the p-value is significant based on an alpha of
+   0.05.
+
+   >>> from skbio.stats.distance import mantel
+   >>> r, p_value, n = mantel(wu_dm, bc_dm)
+   >>> print(r)
+   0.922404392093
+   >>> alpha = 0.05
+   >>> print(p_value < alpha)
+   True
+
+   Next, we'll perform principal coordinates analysis (PCoA) on our weighted
+   UniFrac distance matrix.
+
+   >>> from skbio.stats.ordination import pcoa
+   >>> wu_pc = pcoa(wu_dm)
+
+   PCoA plots are only really interesting in the context of sample metadata, so
+   let's define some before we visualize these results.
+
+   >>> import pandas as pd
+   >>> sample_md = [
+   ...    ('A', ['gut', 's1']),
+   ...    ('B', ['skin', 's1']),
+   ...    ('C', ['tongue', 's1']),
+   ...    ('D', ['gut', 's2']),
+   ...    ('E', ['tongue', 's2']),
+   ...    ('F', ['skin', 's2'])]
+   >>> sample_md = pd.DataFrame.from_items(
+   ...     sample_md, columns=['body_site', 'subject'], orient='index')
+   >>> sample_md
+     body_site subject
+   A       gut      s1
+   B      skin      s1
+   C    tongue      s1
+   D       gut      s2
+   E    tongue      s2
+   F      skin      s2
+
+   Now let's plot our PCoA results, coloring each sample by the subject it
+   was taken from:
+
+   >>> fig = wu_pc.plot(sample_md, 'subject',
+   ...                  axis_labels=('PC 1', 'PC 2', 'PC 3'),
+   ...                  title='Samples colored by subject', cmap='jet', s=50)
+
+.. plot::
+   :context:
+
+   We don't see any clustering/grouping of samples. If we were to instead color
+   the samples by the body site they were taken from, we see that the samples
+   from the same body site (those that are colored the same) appear to be
+   closer to one another in the 3-D space then they are to samples from
+   other body sites.
+
+   >>> import matplotlib.pyplot as plt
+   >>> plt.close('all') # not necessary for normal use
+   >>> fig = wu_pc.plot(sample_md, 'body_site',
+   ...                  axis_labels=('PC 1', 'PC 2', 'PC 3'),
+   ...                  title='Samples colored by body site', cmap='jet', s=50)
+
+.. plot::
+   :context:
+
+   Ordination techniques, such as PCoA, are useful for exploratory analysis.
+   The next step is to quantify the strength of the grouping/clustering that we
+   see in ordination plots. There are many statistical methods available to
+   accomplish this; many operate on distance matrices. Let's use ANOSIM to
+   quantify the strength of the clustering we see in the ordination plots
+   above, using our weighted UniFrac distance matrix and sample metadata.
+
+   First test the grouping of samples by subject:
+
+   >>> from skbio.stats.distance import anosim
+   >>> results = anosim(wu_dm, sample_md, column='subject', permutations=999)
+   >>> results['test statistic']
+   -0.33333333333333331
+   >>> results['p-value'] < 0.1
+   False
+
+   The negative value of ANOSIM's R statistic indicates anti-clustering and the
+   p-value is insignificant at an alpha of 0.1.
+
+   Now let's test the grouping of samples by body site:
+
+   >>> results = anosim(wu_dm, sample_md, column='body_site', permutations=999)
+   >>> results['test statistic']
+   1.0
+   >>> results['p-value'] < 0.1
+   True
+
+   The R statistic indicates strong separation of samples based on body site.
+   The p-value is significant at an alpha of 0.1.
+
+   We can also explore the alpha diversity in the context of sample metadata.
+   To do this, let's add the Observed OTU and Faith PD data to our sample
+   metadata. This is straight-forward beause ``alpha_diversity`` returns a
+   Pandas ``Series`` object, and we're representing our sample metadata in a
+   Pandas ``DataFrame`` object.
+
+   >>> sample_md['Observed OTUs'] = adiv_obs_otus
+   >>> sample_md['Faith PD'] = adiv_faith_pd
+   >>> sample_md
+     body_site subject  Observed OTUs  Faith PD
+   A       gut      s1              5      6.75
+   B      skin      s1              5      7.00
+   C    tongue      s1              4      6.25
+   D       gut      s2              4      5.75
+   E    tongue      s2              5      6.75
+   F      skin      s2              3      5.50
+
+   We can investigate these alpha diversity data in the context of our metadata
+   categories. For example, we can generate boxplots showing Faith PD by body
+   site.
+
+   >>> import matplotlib.pyplot as plt
+   >>> plt.close('all') # not necessary for normal use
+   >>> fig = sample_md.boxplot(column='Faith PD', by='body_site')
+
+We can also compute Spearman correlations between all pairs of columns in this
+``DataFrame``. Since our alpha diversity metrics are the only two numeric
+columns (and thus the only columns for which Spearman correlation is relevant),
+this will give us a symmetric 2x2 correlation matrix.
+
+>>> sample_md.corr(method="spearman")
+               Observed OTUs  Faith PD
+Observed OTUs       1.000000  0.939336
+Faith PD            0.939336  1.000000
+
 """
 
 # ----------------------------------------------------------------------------
@@ -29,4 +400,11 @@ Subpackages
 from __future__ import absolute_import, division, print_function
 
 from skbio.util import TestRunner
+
+from ._driver import (alpha_diversity, beta_diversity,
+                      get_alpha_diversity_metrics, get_beta_diversity_metrics)
+
+__all__ = ["alpha_diversity", "beta_diversity", "get_alpha_diversity_metrics",
+           "get_beta_diversity_metrics"]
+
 test = TestRunner(__file__).test
diff --git a/skbio/diversity/_driver.py b/skbio/diversity/_driver.py
new file mode 100644
index 0000000..61aa403
--- /dev/null
+++ b/skbio/diversity/_driver.py
@@ -0,0 +1,277 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import functools
+
+import scipy.spatial.distance
+import pandas as pd
+
+import skbio
+from skbio.diversity.alpha._faith_pd import _faith_pd, _setup_faith_pd
+from skbio.diversity.beta._unifrac import (
+    _setup_multiple_unweighted_unifrac, _setup_multiple_weighted_unifrac,
+    _normalize_weighted_unifrac_by_default)
+from skbio.util._decorator import experimental
+from skbio.stats.distance import DistanceMatrix
+from skbio.diversity._util import (_validate_counts_matrix,
+                                   _get_phylogenetic_kwargs)
+
+
+def _get_alpha_diversity_metric_map():
+    return {
+        'ace': skbio.diversity.alpha.ace,
+        'chao1': skbio.diversity.alpha.chao1,
+        'chao1_ci': skbio.diversity.alpha.chao1_ci,
+        'berger_parker_d': skbio.diversity.alpha.berger_parker_d,
+        'brillouin_d': skbio.diversity.alpha.brillouin_d,
+        'dominance': skbio.diversity.alpha.dominance,
+        'doubles': skbio.diversity.alpha.doubles,
+        'enspie': skbio.diversity.alpha.enspie,
+        'esty_ci': skbio.diversity.alpha.esty_ci,
+        'faith_pd': skbio.diversity.alpha.faith_pd,
+        'fisher_alpha': skbio.diversity.alpha.fisher_alpha,
+        'goods_coverage': skbio.diversity.alpha.goods_coverage,
+        'heip_e': skbio.diversity.alpha.heip_e,
+        'kempton_taylor_q': skbio.diversity.alpha.kempton_taylor_q,
+        'margalef': skbio.diversity.alpha.margalef,
+        'mcintosh_d': skbio.diversity.alpha.mcintosh_d,
+        'mcintosh_e': skbio.diversity.alpha.mcintosh_e,
+        'menhinick': skbio.diversity.alpha.menhinick,
+        'michaelis_menten_fit': skbio.diversity.alpha.michaelis_menten_fit,
+        'observed_otus': skbio.diversity.alpha.observed_otus,
+        'osd': skbio.diversity.alpha.osd,
+        'pielou_e': skbio.diversity.alpha.pielou_e,
+        'robbins': skbio.diversity.alpha.robbins,
+        'shannon': skbio.diversity.alpha.shannon,
+        'simpson': skbio.diversity.alpha.simpson,
+        'simpson_e': skbio.diversity.alpha.simpson_e,
+        'singles': skbio.diversity.alpha.singles,
+        'strong': skbio.diversity.alpha.strong,
+        'gini_index': skbio.diversity.alpha.gini_index,
+        'lladser_pe': skbio.diversity.alpha.lladser_pe,
+        'lladser_ci': skbio.diversity.alpha.lladser_ci}
+
+
+ at experimental(as_of="0.4.1")
+def get_alpha_diversity_metrics():
+    """ List scikit-bio's alpha diversity metrics
+
+    The alpha diversity metrics listed here can be passed as metrics to
+    ``skbio.diversity.alpha_diversity``.
+
+    Returns
+    -------
+    list of str
+        Alphabetically sorted list of alpha diversity metrics implemented in
+        scikit-bio.
+
+    See Also
+    --------
+    alpha_diversity
+    get_beta_diversity_metrics
+
+    """
+    metrics = _get_alpha_diversity_metric_map()
+    return sorted(metrics.keys())
+
+
+ at experimental(as_of="0.4.1")
+def get_beta_diversity_metrics():
+    """ List scikit-bio's beta diversity metrics
+
+    The beta diversity metrics listed here can be passed as metrics to
+    ``skbio.diversity.beta_diversity``.
+
+    Returns
+    -------
+    list of str
+        Alphabetically sorted list of beta diversity metrics implemented in
+        scikit-bio.
+
+    See Also
+    --------
+    beta_diversity
+    get_alpha_diversity_metrics
+    scipy.spatial.distance.pdist
+
+    Notes
+    -----
+    SciPy implements many additional beta diversity metrics that are not
+    included in this list. See documentation for
+    ``scipy.spatial.distance.pdist`` for more details.
+
+    """
+    return sorted(['unweighted_unifrac', 'weighted_unifrac'])
+
+
+ at experimental(as_of="0.4.1")
+def alpha_diversity(metric, counts, ids=None, validate=True, **kwargs):
+    """ Compute alpha diversity for one or more samples
+
+    Parameters
+    ----------
+    metric : str, callable
+        The alpha diversity metric to apply to the sample(s). Passing metric as
+        a string is preferable as this often results in an optimized version of
+        the metric being used.
+    counts : 1D or 2D array_like of ints or floats
+        Vector or matrix containing count/abundance data. If a matrix, each row
+        should contain counts of OTUs in a given sample.
+    ids : iterable of strs, optional
+        Identifiers for each sample in ``counts``. By default, samples will be
+        assigned integer identifiers in the order that they were provided.
+    validate: bool, optional
+        If `False`, validation of the input won't be performed. This step can
+        be slow, so if validation is run elsewhere it can be disabled here.
+        However, invalid input data can lead to invalid results or error
+        messages that are hard to interpret, so this step should not be
+        bypassed if you're not certain that your input data are valid. See
+        :mod:`skbio.diversity` for the description of what validation entails
+        so you can determine if you can safely disable validation.
+    kwargs : kwargs, optional
+        Metric-specific parameters.
+
+    Returns
+    -------
+    pd.Series
+        Values of ``metric`` for all vectors provided in ``counts``. The index
+        will be ``ids``, if provided.
+
+    Raises
+    ------
+    ValueError, MissingNodeError, DuplicateNodeError
+        If validation fails. Exact error will depend on what was invalid.
+    TypeError
+        If invalid method-specific parameters are provided.
+
+    See Also
+    --------
+    skbio.diversity
+    skbio.diversity.alpha
+    skbio.diversity.get_alpha_diversity_metrics
+    skbio.diversity.beta_diversity
+
+    """
+    metric_map = _get_alpha_diversity_metric_map()
+
+    if validate:
+        counts = _validate_counts_matrix(counts, ids=ids)
+
+    if metric == 'faith_pd':
+        otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
+        counts_by_node, branch_lengths = _setup_faith_pd(
+            counts, otu_ids, tree, validate, single_sample=False)
+        counts = counts_by_node
+        metric = functools.partial(_faith_pd, branch_lengths=branch_lengths)
+    elif callable(metric):
+        metric = functools.partial(metric, **kwargs)
+    elif metric in metric_map:
+        metric = functools.partial(metric_map[metric], **kwargs)
+    else:
+        raise ValueError('Unknown metric provided: %r.' % metric)
+
+    # kwargs is provided here so an error is raised on extra kwargs
+    results = [metric(c, **kwargs) for c in counts]
+    return pd.Series(results, index=ids)
+
+
+ at experimental(as_of="0.4.0")
+def beta_diversity(metric, counts, ids=None, validate=True, pairwise_func=None,
+                   **kwargs):
+    """Compute distances between all pairs of samples
+
+    Parameters
+    ----------
+    metric : str, callable
+        The pairwise distance function to apply. See the scipy ``pdist`` docs
+        and the scikit-bio functions linked under *See Also* for available
+        metrics. Passing metrics as a strings is preferable as this often
+        results in an optimized version of the metric being used.
+    counts : 2D array_like of ints or floats
+        Matrix containing count/abundance data where each row contains counts
+        of OTUs in a given sample.
+    ids : iterable of strs, optional
+        Identifiers for each sample in ``counts``. By default, samples will be
+        assigned integer identifiers in the order that they were provided
+        (where the type of the identifiers will be ``str``).
+    validate : bool, optional
+        If `False`, validation of the input won't be performed. This step can
+        be slow, so if validation is run elsewhere it can be disabled here.
+        However, invalid input data can lead to invalid results or error
+        messages that are hard to interpret, so this step should not be
+        bypassed if you're not certain that your input data are valid. See
+        :mod:`skbio.diversity` for the description of what validation entails
+        so you can determine if you can safely disable validation.
+    pairwise_func : callable, optional
+        The function to use for computing pairwise distances. This function
+        must take ``counts`` and ``metric`` and return a square, hollow, 2-D
+        ``numpy.ndarray`` of dissimilarities (floats). Examples of functions
+        that can be provided are ``scipy.spatial.distance.pdist`` and
+        ``sklearn.metrics.pairwise_distances``. By default,
+        ``scipy.spatial.distance.pdist`` will be used.
+    kwargs : kwargs, optional
+        Metric-specific parameters.
+
+    Returns
+    -------
+    skbio.DistanceMatrix
+        Distances between all pairs of samples (i.e., rows). The number of
+        rows and columns will be equal to the number of rows in ``counts``.
+
+    Raises
+    ------
+    ValueError, MissingNodeError, DuplicateNodeError
+        If validation fails. Exact error will depend on what was invalid.
+    TypeError
+        If invalid method-specific parameters are provided.
+
+    See Also
+    --------
+    skbio.diversity
+    skbio.diversity.beta
+    skbio.diversity.get_beta_diversity_metrics
+    skbio.diversity.alpha_diversity
+    scipy.spatial.distance.pdist
+    sklearn.metrics.pairwise_distances
+
+    """
+    if validate:
+        counts = _validate_counts_matrix(counts, ids=ids)
+
+    if pairwise_func is None:
+        pairwise_func = scipy.spatial.distance.pdist
+
+    if metric == 'unweighted_unifrac':
+        otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
+        metric, counts_by_node = _setup_multiple_unweighted_unifrac(
+                counts, otu_ids=otu_ids, tree=tree, validate=validate)
+        counts = counts_by_node
+    elif metric == 'weighted_unifrac':
+        # get the value for normalized. if it was not provided, it will fall
+        # back to the default value inside of _weighted_unifrac_pdist_f
+        normalized = kwargs.pop('normalized',
+                                _normalize_weighted_unifrac_by_default)
+        otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
+        metric, counts_by_node = _setup_multiple_weighted_unifrac(
+                counts, otu_ids=otu_ids, tree=tree, normalized=normalized,
+                validate=validate)
+        counts = counts_by_node
+    elif callable(metric):
+        metric = functools.partial(metric, **kwargs)
+        # remove all values from kwargs, since they have already been provided
+        # through the partial
+        kwargs = {}
+    else:
+        # metric is a string that scikit-bio doesn't know about, for
+        # example one of the SciPy metrics
+        pass
+
+    distances = pairwise_func(counts, metric=metric, **kwargs)
+    return DistanceMatrix(distances, ids)
diff --git a/skbio/stats/__subsample.c b/skbio/diversity/_phylogenetic.c
similarity index 55%
copy from skbio/stats/__subsample.c
copy to skbio/diversity/_phylogenetic.c
index 261692c..4b29dda 100644
--- a/skbio/stats/__subsample.c
+++ b/skbio/diversity/_phylogenetic.c
@@ -1,33 +1,13 @@
-/* Generated by Cython 0.22 */
-
-/* BEGIN: Cython Metadata
-{
-    "distutils": {
-        "depends": []
-    }
-}
-END: Cython Metadata */
+/* Generated by Cython 0.23.4 */
 
 #define PY_SSIZE_T_CLEAN
-#ifndef CYTHON_USE_PYLONG_INTERNALS
-#ifdef PYLONG_BITS_IN_DIGIT
-#define CYTHON_USE_PYLONG_INTERNALS 0
-#else
-#include "pyconfig.h"
-#ifdef PYLONG_BITS_IN_DIGIT
-#define CYTHON_USE_PYLONG_INTERNALS 1
-#else
-#define CYTHON_USE_PYLONG_INTERNALS 0
-#endif
-#endif
-#endif
 #include "Python.h"
 #ifndef Py_PYTHON_H
     #error Python headers needed to compile C extensions, please install development version of Python.
 #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
     #error Cython requires Python 2.6+ or Python 3.2+.
 #else
-#define CYTHON_ABI "0_22"
+#define CYTHON_ABI "0_23_4"
 #include <stddef.h>
 #ifndef offsetof
 #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
@@ -62,6 +42,9 @@ END: Cython Metadata */
 #define CYTHON_COMPILING_IN_PYPY 0
 #define CYTHON_COMPILING_IN_CPYTHON 1
 #endif
+#if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000
+#define CYTHON_USE_PYLONG_INTERNALS 1
+#endif
 #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
 #define Py_OptimizeFlag 0
 #endif
@@ -69,26 +52,30 @@ END: Cython Metadata */
 #define CYTHON_FORMAT_SSIZE_T "z"
 #if PY_MAJOR_VERSION < 3
   #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
           PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyClass_Type
 #else
   #define __Pyx_BUILTIN_MODULE_NAME "builtins"
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
           PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyType_Type
 #endif
-#if PY_MAJOR_VERSION >= 3
+#ifndef Py_TPFLAGS_CHECKTYPES
   #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
   #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
   #define Py_TPFLAGS_HAVE_NEWBUFFER 0
 #endif
-#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
   #define Py_TPFLAGS_HAVE_FINALIZE 0
 #endif
 #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
   #define CYTHON_PEP393_ENABLED 1
-  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ? \
+  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ?\
                                               0 : _PyUnicode_Ready((PyObject *)(op)))
   #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_LENGTH(u)
   #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
@@ -107,12 +94,13 @@ END: Cython Metadata */
 #if CYTHON_COMPILING_IN_PYPY
   #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
   #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
-  #define __Pyx_PyFrozenSet_Size(s)         PyObject_Size(s)
 #else
   #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
-  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
+  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
       PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
-  #define __Pyx_PyFrozenSet_Size(s)         PySet_Size(s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+  #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)
 #endif
 #define __Pyx_PyString_FormatSafe(a, b)   ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
 #define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
@@ -177,16 +165,18 @@ END: Cython Metadata */
 #else
   #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
 #endif
-#ifndef CYTHON_INLINE
-  #if defined(__GNUC__)
-    #define CYTHON_INLINE __inline__
-  #elif defined(_MSC_VER)
-    #define CYTHON_INLINE __inline
-  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-    #define CYTHON_INLINE inline
-  #else
-    #define CYTHON_INLINE
-  #endif
+#if PY_VERSION_HEX >= 0x030500B1
+#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+#elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+typedef struct {
+    unaryfunc am_await;
+    unaryfunc am_aiter;
+    unaryfunc am_anext;
+} __Pyx_PyAsyncMethodsStruct;
+#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+#else
+#define __Pyx_PyType_AsAsync(obj) NULL
 #endif
 #ifndef CYTHON_RESTRICT
   #if defined(__GNUC__)
@@ -199,43 +189,41 @@ END: Cython Metadata */
     #define CYTHON_RESTRICT
   #endif
 #endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+
+#ifndef CYTHON_INLINE
+  #if defined(__GNUC__)
+    #define CYTHON_INLINE __inline__
+  #elif defined(_MSC_VER)
+    #define CYTHON_INLINE __inline
+  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define CYTHON_INLINE inline
+  #else
+    #define CYTHON_INLINE
+  #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+  #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
 #ifdef NAN
 #define __PYX_NAN() ((float) NAN)
 #else
 static CYTHON_INLINE float __PYX_NAN() {
-  /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
-   a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
-   a quiet NaN. */
   float value;
   memset(&value, 0xFF, sizeof(value));
   return value;
 }
 #endif
-#define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None)
-#ifdef __cplusplus
-template<typename T>
-void __Pyx_call_destructor(T* x) {
-    x->~T();
-}
-template<typename T>
-class __Pyx_FakeReference {
-  public:
-    __Pyx_FakeReference() : ptr(NULL) { }
-    __Pyx_FakeReference(T& ref) : ptr(&ref) { }
-    T *operator->() { return ptr; }
-    operator T&() { return *ptr; }
-  private:
-    T *ptr;
-};
-#endif
 
 
 #if PY_MAJOR_VERSION >= 3
   #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
   #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
 #else
-  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
-  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
+  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_Divide(x,y)
+  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceDivide(x,y)
 #endif
 
 #ifndef __PYX_EXTERN_C
@@ -246,12 +234,8 @@ class __Pyx_FakeReference {
   #endif
 #endif
 
-#if defined(WIN32) || defined(MS_WINDOWS)
-#define _USE_MATH_DEFINES
-#endif
-#include <math.h>
-#define __PYX_HAVE__skbio__stats____subsample
-#define __PYX_HAVE_API__skbio__stats____subsample
+#define __PYX_HAVE__skbio__diversity___phylogenetic
+#define __PYX_HAVE_API__skbio__diversity___phylogenetic
 #include "string.h"
 #include "stdio.h"
 #include "stdlib.h"
@@ -278,6 +262,13 @@ class __Pyx_FakeReference {
 #   define CYTHON_UNUSED
 # endif
 #endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+#  define CYTHON_NCP_UNUSED
+# else
+#  define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
 typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
                 const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
 
@@ -286,16 +277,34 @@ typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
 #define __PYX_DEFAULT_STRING_ENCODING ""
 #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
 #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (    \
-    (sizeof(type) < sizeof(Py_ssize_t))  ||             \
-    (sizeof(type) > sizeof(Py_ssize_t) &&               \
-          likely(v < (type)PY_SSIZE_T_MAX ||            \
-                 v == (type)PY_SSIZE_T_MAX)  &&         \
-          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||       \
-                                v == (type)PY_SSIZE_T_MIN)))  ||  \
-    (sizeof(type) == sizeof(Py_ssize_t) &&              \
-          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||        \
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (\
+    (sizeof(type) < sizeof(Py_ssize_t))  ||\
+    (sizeof(type) > sizeof(Py_ssize_t) &&\
+          likely(v < (type)PY_SSIZE_T_MAX ||\
+                 v == (type)PY_SSIZE_T_MAX)  &&\
+          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+                                v == (type)PY_SSIZE_T_MIN)))  ||\
+    (sizeof(type) == sizeof(Py_ssize_t) &&\
+          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
                                v == (type)PY_SSIZE_T_MAX)))  )
+#if defined (__cplusplus) && __cplusplus >= 201103L
+    #include <cstdlib>
+    #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+    #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+    #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER) && defined (_M_X64)
+    #define __Pyx_sst_abs(value) _abs64(value)
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+    #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+    #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
 static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
 static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
 #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
@@ -330,8 +339,9 @@ static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
 #define __Pyx_PyUnicode_FromUnicode(u)       PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
 #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
 #define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
-#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
-#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
 static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
 static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
 static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
@@ -460,7 +470,7 @@ static const char *__pyx_filename;
 
 
 static const char *__pyx_f[] = {
-  "skbio/stats/__subsample.pyx",
+  "skbio/diversity/_phylogenetic.pyx",
   "__init__.pxd",
   "type.pxd",
 };
@@ -500,7 +510,7 @@ typedef struct {
 } __Pyx_BufFmt_Context;
 
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":725
  * # in Cython to enable them only on the right systems.
  * 
  * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
@@ -509,7 +519,7 @@ typedef struct {
  */
 typedef npy_int8 __pyx_t_5numpy_int8_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":726
  * 
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
@@ -518,7 +528,7 @@ typedef npy_int8 __pyx_t_5numpy_int8_t;
  */
 typedef npy_int16 __pyx_t_5numpy_int16_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":727
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
@@ -527,7 +537,7 @@ typedef npy_int16 __pyx_t_5numpy_int16_t;
  */
 typedef npy_int32 __pyx_t_5numpy_int32_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":729
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":728
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t
  * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
@@ -536,7 +546,7 @@ typedef npy_int32 __pyx_t_5numpy_int32_t;
  */
 typedef npy_int64 __pyx_t_5numpy_int64_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":732
  * #ctypedef npy_int128     int128_t
  * 
  * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
@@ -545,7 +555,7 @@ typedef npy_int64 __pyx_t_5numpy_int64_t;
  */
 typedef npy_uint8 __pyx_t_5numpy_uint8_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":733
  * 
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
@@ -554,7 +564,7 @@ typedef npy_uint8 __pyx_t_5numpy_uint8_t;
  */
 typedef npy_uint16 __pyx_t_5numpy_uint16_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":734
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
@@ -563,7 +573,7 @@ typedef npy_uint16 __pyx_t_5numpy_uint16_t;
  */
 typedef npy_uint32 __pyx_t_5numpy_uint32_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":736
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":735
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t
  * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
@@ -572,7 +582,7 @@ typedef npy_uint32 __pyx_t_5numpy_uint32_t;
  */
 typedef npy_uint64 __pyx_t_5numpy_uint64_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":739
  * #ctypedef npy_uint128    uint128_t
  * 
  * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
@@ -581,7 +591,7 @@ typedef npy_uint64 __pyx_t_5numpy_uint64_t;
  */
 typedef npy_float32 __pyx_t_5numpy_float32_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":741
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":740
  * 
  * ctypedef npy_float32    float32_t
  * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
@@ -590,7 +600,7 @@ typedef npy_float32 __pyx_t_5numpy_float32_t;
  */
 typedef npy_float64 __pyx_t_5numpy_float64_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":749
  * # The int types are mapped a bit surprising --
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
@@ -599,7 +609,7 @@ typedef npy_float64 __pyx_t_5numpy_float64_t;
  */
 typedef npy_long __pyx_t_5numpy_int_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":750
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
@@ -608,7 +618,7 @@ typedef npy_long __pyx_t_5numpy_int_t;
  */
 typedef npy_longlong __pyx_t_5numpy_long_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":751
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t
  * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
@@ -617,7 +627,7 @@ typedef npy_longlong __pyx_t_5numpy_long_t;
  */
 typedef npy_longlong __pyx_t_5numpy_longlong_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":753
  * ctypedef npy_longlong   longlong_t
  * 
  * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
@@ -626,7 +636,7 @@ typedef npy_longlong __pyx_t_5numpy_longlong_t;
  */
 typedef npy_ulong __pyx_t_5numpy_uint_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":754
  * 
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
@@ -635,7 +645,7 @@ typedef npy_ulong __pyx_t_5numpy_uint_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":755
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t
  * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
@@ -644,7 +654,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":757
  * ctypedef npy_ulonglong  ulonglong_t
  * 
  * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
@@ -653,7 +663,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
  */
 typedef npy_intp __pyx_t_5numpy_intp_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":758
  * 
  * ctypedef npy_intp       intp_t
  * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
@@ -662,7 +672,7 @@ typedef npy_intp __pyx_t_5numpy_intp_t;
  */
 typedef npy_uintp __pyx_t_5numpy_uintp_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":760
  * ctypedef npy_uintp      uintp_t
  * 
  * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
@@ -671,7 +681,7 @@ typedef npy_uintp __pyx_t_5numpy_uintp_t;
  */
 typedef npy_double __pyx_t_5numpy_float_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":761
  * 
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
@@ -680,7 +690,7 @@ typedef npy_double __pyx_t_5numpy_float_t;
  */
 typedef npy_double __pyx_t_5numpy_double_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":762
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t
  * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
@@ -688,6 +698,15 @@ typedef npy_double __pyx_t_5numpy_double_t;
  * ctypedef npy_cfloat      cfloat_t
  */
 typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
+
+/* "skbio/diversity/_phylogenetic.pyx":14
+ * 
+ * DTYPE = np.int64
+ * ctypedef np.int64_t DTYPE_t             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+typedef __pyx_t_5numpy_int64_t __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t;
 #if CYTHON_CCOMPLEX
   #ifdef __cplusplus
     typedef ::std::complex< float > __pyx_t_float_complex;
@@ -711,7 +730,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
 
 /*--- Type declarations ---*/
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":764
  * ctypedef npy_longdouble longdouble_t
  * 
  * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
@@ -720,7 +739,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
  */
 typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":765
  * 
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
@@ -729,7 +748,7 @@ typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
  */
 typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":767
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":766
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t
  * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
@@ -738,7 +757,7 @@ typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
  */
 typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":768
  * ctypedef npy_clongdouble clongdouble_t
  * 
  * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
@@ -764,19 +783,19 @@ typedef npy_cdouble __pyx_t_5numpy_complex_t;
   static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
   #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
 #ifdef WITH_THREAD
-  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
-          if (acquire_gil) { \
-              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
-              PyGILState_Release(__pyx_gilstate_save); \
-          } else { \
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+          if (acquire_gil) {\
+              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+              PyGILState_Release(__pyx_gilstate_save);\
+          } else {\
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
           }
 #else
-  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
           __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
 #endif
-  #define __Pyx_RefNannyFinishContext() \
+  #define __Pyx_RefNannyFinishContext()\
           __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
   #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
   #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
@@ -799,13 +818,13 @@ typedef npy_cdouble __pyx_t_5numpy_complex_t;
   #define __Pyx_XGOTREF(r)
   #define __Pyx_XGIVEREF(r)
 #endif
-#define __Pyx_XDECREF_SET(r, v) do {                            \
-        PyObject *tmp = (PyObject *) r;                         \
-        r = v; __Pyx_XDECREF(tmp);                              \
+#define __Pyx_XDECREF_SET(r, v) do {\
+        PyObject *tmp = (PyObject *) r;\
+        r = v; __Pyx_XDECREF(tmp);\
     } while (0)
-#define __Pyx_DECREF_SET(r, v) do {                             \
-        PyObject *tmp = (PyObject *) r;                         \
-        r = v; __Pyx_DECREF(tmp);                               \
+#define __Pyx_DECREF_SET(r, v) do {\
+        PyObject *tmp = (PyObject *) r;\
+        r = v; __Pyx_DECREF(tmp);\
     } while (0)
 #define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
 #define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
@@ -832,8 +851,8 @@ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
 
 static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
 
-static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
-    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
     const char* function_name);
 
 static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
@@ -843,38 +862,36 @@ static CYTHON_INLINE int  __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* o
     __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
 static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
 
-static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
-
 #if CYTHON_COMPILING_IN_CPYTHON
 static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
 #else
 #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
 #endif
 
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
-
-static void __Pyx_RaiseBufferFallbackError(void);
-
-static void __Pyx_RaiseBufferIndexError(int axis);
-
-#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
 #if CYTHON_COMPILING_IN_CPYTHON
 static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
 #endif
 
 static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
 
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
-        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
-        PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
-        int has_cstart, int has_cstop, int wraparound);
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
+#else
+#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
+#endif
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
+
+#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
+
+static void __Pyx_RaiseBufferFallbackError(void);
 
 static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb);
 static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb);
 
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
-
-#if PY_MAJOR_VERSION >= 3
+#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
 static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
     PyObject *value;
     value = PyDict_GetItemWithError(d, key);
@@ -894,12 +911,42 @@ static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
     #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
 #endif
 
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+    __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
+    (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
+               __Pyx_GetItemInt_Generic(o, to_py_func(i))))
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+    __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+    (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+                                                              int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+    __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+    (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+                                                              int wraparound, int boundscheck);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+                                                     int is_list, int wraparound, int boundscheck);
+
+static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
+    int result = PySequence_Contains(seq, item);
+    return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
+}
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
 static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
 
 static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
 
 static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
 
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
 typedef struct {
     int code_line;
     PyCodeObject* code_object;
@@ -942,13 +989,13 @@ typedef struct {
 static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
 static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
-
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value);
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value);
 
-static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *);
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+static CYTHON_INLINE npy_int64 __Pyx_PyInt_As_npy_int64(PyObject *);
 
 #if CYTHON_CCOMPLEX
   #ifdef __cplusplus
@@ -1052,7 +1099,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
 
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
 
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
 
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
 
@@ -1075,19 +1122,21 @@ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
 
 /* Module declarations from 'cpython.buffer' */
 
-/* Module declarations from 'cpython.ref' */
-
 /* Module declarations from 'libc.string' */
 
 /* Module declarations from 'libc.stdio' */
 
-/* Module declarations from 'cpython.object' */
-
 /* Module declarations from '__builtin__' */
 
 /* Module declarations from 'cpython.type' */
 static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
 
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.ref' */
+
 /* Module declarations from 'libc.stdlib' */
 
 /* Module declarations from 'numpy' */
@@ -1100,24 +1149,26 @@ static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
 static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
 static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
 
-/* Module declarations from 'skbio.stats.__subsample' */
-static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t = { "int64_t", NULL, sizeof(__pyx_t_5numpy_int64_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int64_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int64_t), 0 };
-#define __Pyx_MODULE_NAME "skbio.stats.__subsample"
-int __pyx_module_is_main_skbio__stats____subsample = 0;
+/* Module declarations from 'cython' */
+
+/* Module declarations from 'skbio.diversity._phylogenetic' */
+static PyObject *__pyx_f_5skbio_9diversity_13_phylogenetic__traverse_reduce(PyArrayObject *, PyArrayObject *); /*proto*/
+static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_double_t = { "double_t", NULL, sizeof(__pyx_t_5numpy_double_t), { 0 }, 0, 'R', 0, 0 };
+static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t), 0 };
+#define __Pyx_MODULE_NAME "skbio.diversity._phylogenetic"
+int __pyx_module_is_main_skbio__diversity___phylogenetic = 0;
 
-/* Implementation of 'skbio.stats.__subsample' */
+/* Implementation of 'skbio.diversity._phylogenetic' */
 static PyObject *__pyx_builtin_range;
 static PyObject *__pyx_builtin_ValueError;
 static PyObject *__pyx_builtin_RuntimeError;
-static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyObject *__pyx_v_n, PyObject *__pyx_v_counts_sum); /* proto */
-static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
 static char __pyx_k_B[] = "B";
 static char __pyx_k_H[] = "H";
 static char __pyx_k_I[] = "I";
 static char __pyx_k_L[] = "L";
 static char __pyx_k_O[] = "O";
 static char __pyx_k_Q[] = "Q";
+static char __pyx_k_a[] = "a";
 static char __pyx_k_b[] = "b";
 static char __pyx_k_d[] = "d";
 static char __pyx_k_f[] = "f";
@@ -1128,72 +1179,126 @@ static char __pyx_k_j[] = "j";
 static char __pyx_k_l[] = "l";
 static char __pyx_k_n[] = "n";
 static char __pyx_k_q[] = "q";
+static char __pyx_k_t[] = "t";
 static char __pyx_k_Zd[] = "Zd";
 static char __pyx_k_Zf[] = "Zf";
 static char __pyx_k_Zg[] = "Zg";
+static char __pyx_k_id[] = "id";
 static char __pyx_k_np[] = "np";
-static char __pyx_k_cnt[] = "cnt";
-static char __pyx_k_idx[] = "idx";
+static char __pyx_k_p_i[] = "p_i";
+static char __pyx_k_sum[] = "sum";
+static char __pyx_k_copy[] = "copy";
 static char __pyx_k_main[] = "__main__";
+static char __pyx_k_mask[] = "mask";
+static char __pyx_k_name[] = "name";
 static char __pyx_k_test[] = "__test__";
+static char __pyx_k_DTYPE[] = "DTYPE";
 static char __pyx_k_dtype[] = "dtype";
-static char __pyx_k_empty[] = "empty";
+static char __pyx_k_int64[] = "int64";
+static char __pyx_k_nodes[] = "nodes";
 static char __pyx_k_numpy[] = "numpy";
 static char __pyx_k_range[] = "range";
+static char __pyx_k_zeros[] = "zeros";
+static char __pyx_k_astype[] = "astype";
 static char __pyx_k_counts[] = "counts";
+static char __pyx_k_double[] = "double";
 static char __pyx_k_import[] = "__import__";
-static char __pyx_k_random[] = "random";
-static char __pyx_k_result[] = "result";
-static char __pyx_k_permuted[] = "permuted";
-static char __pyx_k_unpacked[] = "unpacked";
+static char __pyx_k_n_rows[] = "n_rows";
+static char __pyx_k_parent[] = "parent";
+static char __pyx_k_tip_ds[] = "tip_ds";
+static char __pyx_k_indexed[] = "indexed";
+static char __pyx_k_nonzero[] = "nonzero";
+static char __pyx_k_tip_ids[] = "tip_ids";
+static char __pyx_k_counts_t[] = "counts_t";
+static char __pyx_k_preorder[] = "preorder";
+static char __pyx_k_transpose[] = "transpose";
 static char __pyx_k_ValueError[] = "ValueError";
-static char __pyx_k_counts_sum[] = "counts_sum";
-static char __pyx_k_zeros_like[] = "zeros_like";
-static char __pyx_k_permutation[] = "permutation";
+static char __pyx_k_atleast_2d[] = "atleast_2d";
+static char __pyx_k_child_index[] = "child_index";
+static char __pyx_k_count_array[] = "count_array";
+static char __pyx_k_node_lookup[] = "node_lookup";
+static char __pyx_k_tip_indices[] = "tip_indices";
 static char __pyx_k_RuntimeError[] = "RuntimeError";
-static char __pyx_k_unpacked_idx[] = "unpacked_idx";
-static char __pyx_k_skbio_stats___subsample[] = "skbio.stats.__subsample";
+static char __pyx_k_include_self[] = "include_self";
+static char __pyx_k_n_count_otus[] = "n_count_otus";
+static char __pyx_k_observed_ids[] = "observed_ids";
+static char __pyx_k_otus_in_nodes[] = "otus_in_nodes";
+static char __pyx_k_tip_distances[] = "_tip_distances";
+static char __pyx_k_n_count_vectors[] = "n_count_vectors";
+static char __pyx_k_nodes_by_counts[] = "_nodes_by_counts";
+static char __pyx_k_observed_ids_set[] = "observed_ids_set";
+static char __pyx_k_observed_indices[] = "observed_indices";
 static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
-static char __pyx_k_home_evan_biocore_scikit_bio_sk[] = "/home/evan/biocore/scikit-bio/skbio/stats/__subsample.pyx";
-static char __pyx_k_subsample_counts_without_replac[] = "_subsample_counts_without_replacement";
+static char __pyx_k_skbio_diversity__phylogenetic[] = "skbio.diversity._phylogenetic";
+static char __pyx_k_Users_caporaso_Dropbox_code_sci[] = "/Users/caporaso/Dropbox/code/scikit-bio/skbio/diversity/_phylogenetic.pyx";
 static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
 static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
 static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
 static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
 static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
+static PyObject *__pyx_n_s_DTYPE;
 static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
 static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
 static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
 static PyObject *__pyx_n_s_RuntimeError;
+static PyObject *__pyx_kp_s_Users_caporaso_Dropbox_code_sci;
 static PyObject *__pyx_n_s_ValueError;
-static PyObject *__pyx_n_s_cnt;
+static PyObject *__pyx_n_s_a;
+static PyObject *__pyx_n_s_astype;
+static PyObject *__pyx_n_s_atleast_2d;
+static PyObject *__pyx_n_s_child_index;
+static PyObject *__pyx_n_s_copy;
+static PyObject *__pyx_n_s_count_array;
 static PyObject *__pyx_n_s_counts;
-static PyObject *__pyx_n_s_counts_sum;
+static PyObject *__pyx_n_s_counts_t;
+static PyObject *__pyx_n_s_double;
 static PyObject *__pyx_n_s_dtype;
-static PyObject *__pyx_n_s_empty;
-static PyObject *__pyx_kp_s_home_evan_biocore_scikit_bio_sk;
 static PyObject *__pyx_n_s_i;
-static PyObject *__pyx_n_s_idx;
+static PyObject *__pyx_n_s_id;
 static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_include_self;
+static PyObject *__pyx_n_s_indexed;
+static PyObject *__pyx_n_s_int64;
 static PyObject *__pyx_n_s_j;
 static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_mask;
 static PyObject *__pyx_n_s_n;
+static PyObject *__pyx_n_s_n_count_otus;
+static PyObject *__pyx_n_s_n_count_vectors;
+static PyObject *__pyx_n_s_n_rows;
+static PyObject *__pyx_n_s_name;
 static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
 static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
+static PyObject *__pyx_n_s_node_lookup;
+static PyObject *__pyx_n_s_nodes;
+static PyObject *__pyx_n_s_nodes_by_counts;
+static PyObject *__pyx_n_s_nonzero;
 static PyObject *__pyx_n_s_np;
 static PyObject *__pyx_n_s_numpy;
-static PyObject *__pyx_n_s_permutation;
-static PyObject *__pyx_n_s_permuted;
-static PyObject *__pyx_n_s_random;
+static PyObject *__pyx_n_s_observed_ids;
+static PyObject *__pyx_n_s_observed_ids_set;
+static PyObject *__pyx_n_s_observed_indices;
+static PyObject *__pyx_n_s_otus_in_nodes;
+static PyObject *__pyx_n_s_p_i;
+static PyObject *__pyx_n_s_parent;
+static PyObject *__pyx_n_s_preorder;
 static PyObject *__pyx_n_s_range;
-static PyObject *__pyx_n_s_result;
-static PyObject *__pyx_n_s_skbio_stats___subsample;
-static PyObject *__pyx_n_s_subsample_counts_without_replac;
+static PyObject *__pyx_n_s_skbio_diversity__phylogenetic;
+static PyObject *__pyx_n_s_sum;
+static PyObject *__pyx_n_s_t;
 static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_tip_distances;
+static PyObject *__pyx_n_s_tip_ds;
+static PyObject *__pyx_n_s_tip_ids;
+static PyObject *__pyx_n_s_tip_indices;
+static PyObject *__pyx_n_s_transpose;
 static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
-static PyObject *__pyx_n_s_unpacked;
-static PyObject *__pyx_n_s_unpacked_idx;
-static PyObject *__pyx_n_s_zeros_like;
+static PyObject *__pyx_n_s_zeros;
+static PyObject *__pyx_pf_5skbio_9diversity_13_phylogenetic__tip_distances(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_a, PyObject *__pyx_v_t, PyArrayObject *__pyx_v_tip_indices); /* proto */
+static PyObject *__pyx_pf_5skbio_9diversity_13_phylogenetic_2_nodes_by_counts(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyArrayObject *__pyx_v_tip_ids, PyObject *__pyx_v_indexed); /* proto */
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
+static PyObject *__pyx_int_0;
 static PyObject *__pyx_tuple_;
 static PyObject *__pyx_tuple__2;
 static PyObject *__pyx_tuple__3;
@@ -1201,31 +1306,35 @@ static PyObject *__pyx_tuple__4;
 static PyObject *__pyx_tuple__5;
 static PyObject *__pyx_tuple__6;
 static PyObject *__pyx_tuple__7;
-static PyObject *__pyx_codeobj__8;
+static PyObject *__pyx_tuple__8;
+static PyObject *__pyx_tuple__10;
+static PyObject *__pyx_codeobj__9;
+static PyObject *__pyx_codeobj__11;
 
-/* "skbio/stats/__subsample.pyx":15
- * 
- * 
- * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
- *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
- *     cdef:
+/* "skbio/diversity/_phylogenetic.pyx":19
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _tip_distances(np.ndarray[np.double_t, ndim=1] a, object t,             # <<<<<<<<<<<<<<
+ *                    np.ndarray[DTYPE_t, ndim=1] tip_indices):
+ *     """Sets each tip to its distance from the root
  */
 
 /* Python wrapper */
-static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static PyMethodDef __pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement = {"_subsample_counts_without_replacement", (PyCFunction)__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, METH_VARARGS|METH_KEYWORDS, 0};
-static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyArrayObject *__pyx_v_counts = 0;
-  PyObject *__pyx_v_n = 0;
-  PyObject *__pyx_v_counts_sum = 0;
+static PyObject *__pyx_pw_5skbio_9diversity_13_phylogenetic_1_tip_distances(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_5skbio_9diversity_13_phylogenetic__tip_distances[] = "Sets each tip to its distance from the root\n\n    Parameters\n    ----------\n    a : np.ndarray of double\n        A matrix in which each row corresponds to a node in ``t``.\n    t : skbio.tree.TreeNode\n        The tree that corresponds to the rows in ``a``.\n    tip_indices : np.ndarray of int\n        The index positions in ``a`` of the tips in ``t``.\n\n    Returns\n    -------\n    np.ndarray of double\n   [...]
+static PyMethodDef __pyx_mdef_5skbio_9diversity_13_phylogenetic_1_tip_distances = {"_tip_distances", (PyCFunction)__pyx_pw_5skbio_9diversity_13_phylogenetic_1_tip_distances, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_9diversity_13_phylogenetic__tip_distances};
+static PyObject *__pyx_pw_5skbio_9diversity_13_phylogenetic_1_tip_distances(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyArrayObject *__pyx_v_a = 0;
+  PyObject *__pyx_v_t = 0;
+  PyArrayObject *__pyx_v_tip_indices = 0;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   PyObject *__pyx_r = 0;
   __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("_subsample_counts_without_replacement (wrapper)", 0);
+  __Pyx_RefNannySetupContext("_tip_distances (wrapper)", 0);
   {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_counts,&__pyx_n_s_n,&__pyx_n_s_counts_sum,0};
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_a,&__pyx_n_s_t,&__pyx_n_s_tip_indices,0};
     PyObject* values[3] = {0,0,0};
     if (unlikely(__pyx_kwds)) {
       Py_ssize_t kw_args;
@@ -1240,21 +1349,21 @@ static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without
       kw_args = PyDict_Size(__pyx_kwds);
       switch (pos_args) {
         case  0:
-        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--;
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--;
         else goto __pyx_L5_argtuple_error;
         case  1:
-        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--;
+        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_t)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_tip_distances", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  2:
-        if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts_sum)) != 0)) kw_args--;
+        if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_tip_indices)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_tip_distances", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_subsample_counts_without_replacement") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_tip_distances") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
       goto __pyx_L5_argtuple_error;
@@ -1263,20 +1372,21 @@ static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without
       values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
       values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
     }
-    __pyx_v_counts = ((PyArrayObject *)values[0]);
-    __pyx_v_n = values[1];
-    __pyx_v_counts_sum = values[2];
+    __pyx_v_a = ((PyArrayObject *)values[0]);
+    __pyx_v_t = values[1];
+    __pyx_v_tip_indices = ((PyArrayObject *)values[2]);
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("_tip_distances", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
-  __Pyx_AddTraceback("skbio.stats.__subsample._subsample_counts_without_replacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_AddTraceback("skbio.diversity._phylogenetic._tip_distances", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
   return NULL;
   __pyx_L4_argument_unpacking_done:;
-  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_counts), __pyx_ptype_5numpy_ndarray, 1, "counts", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_r = __pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(__pyx_self, __pyx_v_counts, __pyx_v_n, __pyx_v_counts_sum);
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_a), __pyx_ptype_5numpy_ndarray, 1, "a", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_tip_indices), __pyx_ptype_5numpy_ndarray, 1, "tip_indices", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_r = __pyx_pf_5skbio_9diversity_13_phylogenetic__tip_distances(__pyx_self, __pyx_v_a, __pyx_v_t, __pyx_v_tip_indices);
 
   /* function exit code */
   goto __pyx_L0;
@@ -1287,376 +1397,353 @@ static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without
   return __pyx_r;
 }
 
-static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyObject *__pyx_v_n, PyObject *__pyx_v_counts_sum) {
-  PyArrayObject *__pyx_v_result = 0;
-  PyArrayObject *__pyx_v_permuted = 0;
-  PyArrayObject *__pyx_v_unpacked = 0;
-  __pyx_t_5numpy_int64_t __pyx_v_cnt;
-  Py_ssize_t __pyx_v_unpacked_idx;
+static PyObject *__pyx_pf_5skbio_9diversity_13_phylogenetic__tip_distances(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_a, PyObject *__pyx_v_t, PyArrayObject *__pyx_v_tip_indices) {
+  PyObject *__pyx_v_n = 0;
   Py_ssize_t __pyx_v_i;
-  CYTHON_UNUSED Py_ssize_t __pyx_v_j;
-  npy_intp __pyx_v_idx;
-  __Pyx_LocalBuf_ND __pyx_pybuffernd_counts;
-  __Pyx_Buffer __pyx_pybuffer_counts;
-  __Pyx_LocalBuf_ND __pyx_pybuffernd_permuted;
-  __Pyx_Buffer __pyx_pybuffer_permuted;
-  __Pyx_LocalBuf_ND __pyx_pybuffernd_result;
-  __Pyx_Buffer __pyx_pybuffer_result;
-  __Pyx_LocalBuf_ND __pyx_pybuffernd_unpacked;
-  __Pyx_Buffer __pyx_pybuffer_unpacked;
+  Py_ssize_t __pyx_v_p_i;
+  Py_ssize_t __pyx_v_n_rows;
+  PyArrayObject *__pyx_v_mask = 0;
+  PyArrayObject *__pyx_v_tip_ds = 0;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_a;
+  __Pyx_Buffer __pyx_pybuffer_a;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_mask;
+  __Pyx_Buffer __pyx_pybuffer_mask;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_tip_ds;
+  __Pyx_Buffer __pyx_pybuffer_tip_ds;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_tip_indices;
+  __Pyx_Buffer __pyx_pybuffer_tip_indices;
   PyObject *__pyx_r = NULL;
   __Pyx_RefNannyDeclarations
   PyObject *__pyx_t_1 = NULL;
   PyObject *__pyx_t_2 = NULL;
   PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyArrayObject *__pyx_t_5 = NULL;
-  int __pyx_t_6;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  PyObject *__pyx_t_9 = NULL;
-  npy_intp __pyx_t_10;
-  Py_ssize_t __pyx_t_11;
-  Py_ssize_t __pyx_t_12;
-  __pyx_t_5numpy_int64_t __pyx_t_13;
-  Py_ssize_t __pyx_t_14;
-  Py_ssize_t __pyx_t_15;
-  npy_intp __pyx_t_16;
+  PyArrayObject *__pyx_t_4 = NULL;
+  Py_ssize_t __pyx_t_5;
+  PyObject *(*__pyx_t_6)(PyObject *);
+  Py_ssize_t __pyx_t_7;
+  Py_ssize_t __pyx_t_8;
+  Py_ssize_t __pyx_t_9;
+  PyObject *__pyx_t_10 = NULL;
+  PyObject *__pyx_t_11 = NULL;
+  PyArrayObject *__pyx_t_12 = NULL;
+  int __pyx_t_13;
+  PyObject *__pyx_t_14 = NULL;
+  PyObject *__pyx_t_15 = NULL;
+  PyObject *__pyx_t_16 = NULL;
   npy_intp __pyx_t_17;
+  Py_ssize_t __pyx_t_18;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_19;
+  Py_ssize_t __pyx_t_20;
+  Py_ssize_t __pyx_t_21;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("_subsample_counts_without_replacement", 0);
-  __pyx_pybuffer_result.pybuffer.buf = NULL;
-  __pyx_pybuffer_result.refcount = 0;
-  __pyx_pybuffernd_result.data = NULL;
-  __pyx_pybuffernd_result.rcbuffer = &__pyx_pybuffer_result;
-  __pyx_pybuffer_permuted.pybuffer.buf = NULL;
-  __pyx_pybuffer_permuted.refcount = 0;
-  __pyx_pybuffernd_permuted.data = NULL;
-  __pyx_pybuffernd_permuted.rcbuffer = &__pyx_pybuffer_permuted;
-  __pyx_pybuffer_unpacked.pybuffer.buf = NULL;
-  __pyx_pybuffer_unpacked.refcount = 0;
-  __pyx_pybuffernd_unpacked.data = NULL;
-  __pyx_pybuffernd_unpacked.rcbuffer = &__pyx_pybuffer_unpacked;
-  __pyx_pybuffer_counts.pybuffer.buf = NULL;
-  __pyx_pybuffer_counts.refcount = 0;
-  __pyx_pybuffernd_counts.data = NULL;
-  __pyx_pybuffernd_counts.rcbuffer = &__pyx_pybuffer_counts;
+  __Pyx_RefNannySetupContext("_tip_distances", 0);
+  __pyx_pybuffer_mask.pybuffer.buf = NULL;
+  __pyx_pybuffer_mask.refcount = 0;
+  __pyx_pybuffernd_mask.data = NULL;
+  __pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask;
+  __pyx_pybuffer_tip_ds.pybuffer.buf = NULL;
+  __pyx_pybuffer_tip_ds.refcount = 0;
+  __pyx_pybuffernd_tip_ds.data = NULL;
+  __pyx_pybuffernd_tip_ds.rcbuffer = &__pyx_pybuffer_tip_ds;
+  __pyx_pybuffer_a.pybuffer.buf = NULL;
+  __pyx_pybuffer_a.refcount = 0;
+  __pyx_pybuffernd_a.data = NULL;
+  __pyx_pybuffernd_a.rcbuffer = &__pyx_pybuffer_a;
+  __pyx_pybuffer_tip_indices.pybuffer.buf = NULL;
+  __pyx_pybuffer_tip_indices.refcount = 0;
+  __pyx_pybuffernd_tip_indices.data = NULL;
+  __pyx_pybuffernd_tip_indices.rcbuffer = &__pyx_pybuffer_tip_indices;
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_a.rcbuffer->pybuffer, (PyObject*)__pyx_v_a, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_pybuffernd_a.diminfo[0].strides = __pyx_pybuffernd_a.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_a.diminfo[0].shape = __pyx_pybuffernd_a.rcbuffer->pybuffer.shape[0];
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
-    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_counts.rcbuffer->pybuffer, (PyObject*)__pyx_v_counts, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_tip_indices.rcbuffer->pybuffer, (PyObject*)__pyx_v_tip_indices, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
-  __pyx_pybuffernd_counts.diminfo[0].strides = __pyx_pybuffernd_counts.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_counts.diminfo[0].shape = __pyx_pybuffernd_counts.rcbuffer->pybuffer.shape[0];
+  __pyx_pybuffernd_tip_indices.diminfo[0].strides = __pyx_pybuffernd_tip_indices.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_tip_indices.diminfo[0].shape = __pyx_pybuffernd_tip_indices.rcbuffer->pybuffer.shape[0];
 
-  /* "skbio/stats/__subsample.pyx":22
- *         Py_ssize_t unpacked_idx, i, j
+  /* "skbio/diversity/_phylogenetic.pyx":43
+ *         Py_ssize_t i, p_i, n_rows
+ *         np.ndarray[np.double_t, ndim=1] mask
+ *         np.ndarray[np.double_t, ndim=1] tip_ds = a.copy()             # <<<<<<<<<<<<<<
  * 
- *     unpacked = np.empty(counts_sum, dtype=int)             # <<<<<<<<<<<<<<
- *     unpacked_idx = 0
- *     for i in range(counts.shape[0]):
+ *     # preorder reduction over the tree to gather distances at the tips
  */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_a), __pyx_n_s_copy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_2))) {
+    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+    if (likely(__pyx_t_3)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+      __Pyx_INCREF(__pyx_t_3);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_2, function);
+    }
+  }
+  if (__pyx_t_3) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  } else {
+    __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_v_counts_sum);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_counts_sum);
-  __Pyx_GIVEREF(__pyx_v_counts_sum);
-  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, ((PyObject *)((PyObject*)(&PyInt_Type)))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
+  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = ((PyArrayObject *)__pyx_t_1);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer);
-    __pyx_t_6 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
-    if (unlikely(__pyx_t_6 < 0)) {
-      PyErr_Fetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
-      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer, (PyObject*)__pyx_v_unpacked, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
-        Py_XDECREF(__pyx_t_7); Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9);
-        __Pyx_RaiseBufferFallbackError();
-      } else {
-        PyErr_Restore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
-      }
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_tip_ds.rcbuffer->pybuffer, (PyObject*)__pyx_t_4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
+      __pyx_v_tip_ds = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_tip_ds.rcbuffer->pybuffer.buf = NULL;
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    } else {__pyx_pybuffernd_tip_ds.diminfo[0].strides = __pyx_pybuffernd_tip_ds.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_tip_ds.diminfo[0].shape = __pyx_pybuffernd_tip_ds.rcbuffer->pybuffer.shape[0];
     }
-    __pyx_pybuffernd_unpacked.diminfo[0].strides = __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_unpacked.diminfo[0].shape = __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
-  __pyx_t_5 = 0;
-  __pyx_v_unpacked = ((PyArrayObject *)__pyx_t_4);
   __pyx_t_4 = 0;
+  __pyx_v_tip_ds = ((PyArrayObject *)__pyx_t_1);
+  __pyx_t_1 = 0;
 
-  /* "skbio/stats/__subsample.pyx":23
- * 
- *     unpacked = np.empty(counts_sum, dtype=int)
- *     unpacked_idx = 0             # <<<<<<<<<<<<<<
- *     for i in range(counts.shape[0]):
- *         cnt = counts[i]
- */
-  __pyx_v_unpacked_idx = 0;
-
-  /* "skbio/stats/__subsample.pyx":24
- *     unpacked = np.empty(counts_sum, dtype=int)
- *     unpacked_idx = 0
- *     for i in range(counts.shape[0]):             # <<<<<<<<<<<<<<
- *         cnt = counts[i]
- *         for j in range(cnt):
- */
-  __pyx_t_10 = (__pyx_v_counts->dimensions[0]);
-  for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
-    __pyx_v_i = __pyx_t_11;
-
-    /* "skbio/stats/__subsample.pyx":25
- *     unpacked_idx = 0
- *     for i in range(counts.shape[0]):
- *         cnt = counts[i]             # <<<<<<<<<<<<<<
- *         for j in range(cnt):
- *             unpacked[unpacked_idx] = i
- */
-    __pyx_t_12 = __pyx_v_i;
-    __pyx_t_6 = -1;
-    if (__pyx_t_12 < 0) {
-      __pyx_t_12 += __pyx_pybuffernd_counts.diminfo[0].shape;
-      if (unlikely(__pyx_t_12 < 0)) __pyx_t_6 = 0;
-    } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_counts.diminfo[0].shape)) __pyx_t_6 = 0;
-    if (unlikely(__pyx_t_6 != -1)) {
-      __Pyx_RaiseBufferIndexError(__pyx_t_6);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    }
-    __pyx_v_cnt = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_counts.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_counts.diminfo[0].strides));
-
-    /* "skbio/stats/__subsample.pyx":26
- *     for i in range(counts.shape[0]):
- *         cnt = counts[i]
- *         for j in range(cnt):             # <<<<<<<<<<<<<<
- *             unpacked[unpacked_idx] = i
- *             unpacked_idx += 1
- */
-    __pyx_t_13 = __pyx_v_cnt;
-    for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) {
-      __pyx_v_j = __pyx_t_14;
-
-      /* "skbio/stats/__subsample.pyx":27
- *         cnt = counts[i]
- *         for j in range(cnt):
- *             unpacked[unpacked_idx] = i             # <<<<<<<<<<<<<<
- *             unpacked_idx += 1
- * 
- */
-      __pyx_t_15 = __pyx_v_unpacked_idx;
-      __pyx_t_6 = -1;
-      if (__pyx_t_15 < 0) {
-        __pyx_t_15 += __pyx_pybuffernd_unpacked.diminfo[0].shape;
-        if (unlikely(__pyx_t_15 < 0)) __pyx_t_6 = 0;
-      } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_unpacked.diminfo[0].shape)) __pyx_t_6 = 0;
-      if (unlikely(__pyx_t_6 != -1)) {
-        __Pyx_RaiseBufferIndexError(__pyx_t_6);
-        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      }
-      *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_unpacked.diminfo[0].strides) = __pyx_v_i;
-
-      /* "skbio/stats/__subsample.pyx":28
- *         for j in range(cnt):
- *             unpacked[unpacked_idx] = i
- *             unpacked_idx += 1             # <<<<<<<<<<<<<<
+  /* "skbio/diversity/_phylogenetic.pyx":46
  * 
- *     permuted = np.random.permutation(unpacked)[:n]
+ *     # preorder reduction over the tree to gather distances at the tips
+ *     n_rows = tip_ds.shape[0]             # <<<<<<<<<<<<<<
+ *     for n in t.preorder(include_self=False):
+ *         i = n.id
  */
-      __pyx_v_unpacked_idx = (__pyx_v_unpacked_idx + 1);
-    }
-  }
+  __pyx_v_n_rows = (__pyx_v_tip_ds->dimensions[0]);
 
-  /* "skbio/stats/__subsample.pyx":30
- *             unpacked_idx += 1
- * 
- *     permuted = np.random.permutation(unpacked)[:n]             # <<<<<<<<<<<<<<
- * 
- *     result = np.zeros_like(counts)
+  /* "skbio/diversity/_phylogenetic.pyx":47
+ *     # preorder reduction over the tree to gather distances at the tips
+ *     n_rows = tip_ds.shape[0]
+ *     for n in t.preorder(include_self=False):             # <<<<<<<<<<<<<<
+ *         i = n.id
+ *         p_i = n.parent.id
  */
-  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_t, __pyx_n_s_preorder); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_permutation); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_include_self, Py_False) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = NULL;
-  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_1)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_1);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  if (!__pyx_t_1) {
-    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_3, ((PyObject *)__pyx_v_unpacked)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) {
+    __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_5 = 0;
+    __pyx_t_6 = NULL;
   } else {
-    __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
-    PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = NULL;
-    __Pyx_INCREF(((PyObject *)__pyx_v_unpacked));
-    PyTuple_SET_ITEM(__pyx_t_2, 0+1, ((PyObject *)__pyx_v_unpacked));
-    __Pyx_GIVEREF(((PyObject *)__pyx_v_unpacked));
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+    __pyx_t_6 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_4, 0, 0, NULL, &__pyx_v_n, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
-  {
-    __Pyx_BufFmt_StackElem __pyx_stack[1];
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer);
-    __pyx_t_6 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
-    if (unlikely(__pyx_t_6 < 0)) {
-      PyErr_Fetch(&__pyx_t_9, &__pyx_t_8, &__pyx_t_7);
-      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer, (PyObject*)__pyx_v_permuted, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
-        Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_7);
-        __Pyx_RaiseBufferFallbackError();
+  for (;;) {
+    if (likely(!__pyx_t_6)) {
+      if (likely(PyList_CheckExact(__pyx_t_2))) {
+        if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_2)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        #endif
       } else {
-        PyErr_Restore(__pyx_t_9, __pyx_t_8, __pyx_t_7);
+        if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        #endif
+      }
+    } else {
+      __pyx_t_3 = __pyx_t_6(__pyx_t_2);
+      if (unlikely(!__pyx_t_3)) {
+        PyObject* exc_type = PyErr_Occurred();
+        if (exc_type) {
+          if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        }
+        break;
       }
+      __Pyx_GOTREF(__pyx_t_3);
     }
-    __pyx_pybuffernd_permuted.diminfo[0].strides = __pyx_pybuffernd_permuted.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_permuted.diminfo[0].shape = __pyx_pybuffernd_permuted.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  }
-  __pyx_t_5 = 0;
-  __pyx_v_permuted = ((PyArrayObject *)__pyx_t_3);
-  __pyx_t_3 = 0;
+    __Pyx_XDECREF_SET(__pyx_v_n, __pyx_t_3);
+    __pyx_t_3 = 0;
 
-  /* "skbio/stats/__subsample.pyx":32
- *     permuted = np.random.permutation(unpacked)[:n]
+    /* "skbio/diversity/_phylogenetic.pyx":48
+ *     n_rows = tip_ds.shape[0]
+ *     for n in t.preorder(include_self=False):
+ *         i = n.id             # <<<<<<<<<<<<<<
+ *         p_i = n.parent.id
  * 
- *     result = np.zeros_like(counts)             # <<<<<<<<<<<<<<
- *     for idx in range(permuted.shape[0]):
- *         result[permuted[idx]] += 1
  */
-  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_2))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_2, function);
-    }
-  }
-  if (!__pyx_t_4) {
-    __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_counts)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_n, __pyx_n_s_id); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
-  } else {
-    __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
-    __Pyx_INCREF(((PyObject *)__pyx_v_counts));
-    PyTuple_SET_ITEM(__pyx_t_1, 0+1, ((PyObject *)__pyx_v_counts));
-    __Pyx_GIVEREF(((PyObject *)__pyx_v_counts));
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_7 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __pyx_v_i = __pyx_t_7;
+
+    /* "skbio/diversity/_phylogenetic.pyx":49
+ *     for n in t.preorder(include_self=False):
+ *         i = n.id
+ *         p_i = n.parent.id             # <<<<<<<<<<<<<<
+ * 
+ *         tip_ds[i] += tip_ds[p_i]
+ */
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_n, __pyx_n_s_parent); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
+    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_id); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __pyx_t_7 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_7 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __pyx_v_p_i = __pyx_t_7;
+
+    /* "skbio/diversity/_phylogenetic.pyx":51
+ *         p_i = n.parent.id
+ * 
+ *         tip_ds[i] += tip_ds[p_i]             # <<<<<<<<<<<<<<
+ * 
+ *     # construct a mask that represents the locations of the tips
+ */
+    __pyx_t_8 = __pyx_v_p_i;
+    __pyx_t_9 = __pyx_v_i;
+    *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_tip_ds.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_tip_ds.diminfo[0].strides) += (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_tip_ds.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_tip_ds.diminfo[0].strides));
+
+    /* "skbio/diversity/_phylogenetic.pyx":47
+ *     # preorder reduction over the tree to gather distances at the tips
+ *     n_rows = tip_ds.shape[0]
+ *     for n in t.preorder(include_self=False):             # <<<<<<<<<<<<<<
+ *         i = n.id
+ *         p_i = n.parent.id
+ */
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
+
+  /* "skbio/diversity/_phylogenetic.pyx":54
+ * 
+ *     # construct a mask that represents the locations of the tips
+ *     mask = np.zeros(n_rows, dtype=np.double)             # <<<<<<<<<<<<<<
+ *     for i in range(tip_indices.shape[0]):
+ *         mask[tip_indices[i]] = 1.0
+ */
+  __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_n_rows); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_GIVEREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
+  __pyx_t_2 = 0;
+  __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_10);
+  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_double); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_11);
+  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_11) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+  __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_11);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  if (!(likely(((__pyx_t_11) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_11, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_12 = ((PyArrayObject *)__pyx_t_11);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result.rcbuffer->pybuffer);
-    __pyx_t_6 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_result.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
-    if (unlikely(__pyx_t_6 < 0)) {
-      PyErr_Fetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
-      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_result.rcbuffer->pybuffer, (PyObject*)__pyx_v_result, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
-        Py_XDECREF(__pyx_t_7); Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer);
+    __pyx_t_13 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_13 < 0)) {
+      PyErr_Fetch(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16);
         __Pyx_RaiseBufferFallbackError();
       } else {
-        PyErr_Restore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
+        PyErr_Restore(__pyx_t_14, __pyx_t_15, __pyx_t_16);
       }
     }
-    __pyx_pybuffernd_result.diminfo[0].strides = __pyx_pybuffernd_result.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_result.diminfo[0].shape = __pyx_pybuffernd_result.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
-  __pyx_t_5 = 0;
-  __pyx_v_result = ((PyArrayObject *)__pyx_t_3);
-  __pyx_t_3 = 0;
+  __pyx_t_12 = 0;
+  __pyx_v_mask = ((PyArrayObject *)__pyx_t_11);
+  __pyx_t_11 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":55
+ *     # construct a mask that represents the locations of the tips
+ *     mask = np.zeros(n_rows, dtype=np.double)
+ *     for i in range(tip_indices.shape[0]):             # <<<<<<<<<<<<<<
+ *         mask[tip_indices[i]] = 1.0
+ * 
+ */
+  __pyx_t_17 = (__pyx_v_tip_indices->dimensions[0]);
+  for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_17; __pyx_t_5+=1) {
+    __pyx_v_i = __pyx_t_5;
 
-  /* "skbio/stats/__subsample.pyx":33
- * 
- *     result = np.zeros_like(counts)
- *     for idx in range(permuted.shape[0]):             # <<<<<<<<<<<<<<
- *         result[permuted[idx]] += 1
- * 
- */
-  __pyx_t_10 = (__pyx_v_permuted->dimensions[0]);
-  for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_10; __pyx_t_16+=1) {
-    __pyx_v_idx = __pyx_t_16;
-
-    /* "skbio/stats/__subsample.pyx":34
- *     result = np.zeros_like(counts)
- *     for idx in range(permuted.shape[0]):
- *         result[permuted[idx]] += 1             # <<<<<<<<<<<<<<
- * 
- *     return result
- */
-    __pyx_t_17 = __pyx_v_idx;
-    __pyx_t_6 = -1;
-    if (__pyx_t_17 < 0) {
-      __pyx_t_17 += __pyx_pybuffernd_permuted.diminfo[0].shape;
-      if (unlikely(__pyx_t_17 < 0)) __pyx_t_6 = 0;
-    } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_permuted.diminfo[0].shape)) __pyx_t_6 = 0;
-    if (unlikely(__pyx_t_6 != -1)) {
-      __Pyx_RaiseBufferIndexError(__pyx_t_6);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    }
-    __pyx_t_13 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_permuted.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_permuted.diminfo[0].strides));
-    __pyx_t_6 = -1;
-    if (__pyx_t_13 < 0) {
-      __pyx_t_13 += __pyx_pybuffernd_result.diminfo[0].shape;
-      if (unlikely(__pyx_t_13 < 0)) __pyx_t_6 = 0;
-    } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_result.diminfo[0].shape)) __pyx_t_6 = 0;
-    if (unlikely(__pyx_t_6 != -1)) {
-      __Pyx_RaiseBufferIndexError(__pyx_t_6);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    }
-    *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_result.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_result.diminfo[0].strides) += 1;
+    /* "skbio/diversity/_phylogenetic.pyx":56
+ *     mask = np.zeros(n_rows, dtype=np.double)
+ *     for i in range(tip_indices.shape[0]):
+ *         mask[tip_indices[i]] = 1.0             # <<<<<<<<<<<<<<
+ * 
+ *     # apply the mask such that tip_ds only includes values which correspond to
+ */
+    __pyx_t_18 = __pyx_v_i;
+    __pyx_t_19 = (*__Pyx_BufPtrStrided1d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_tip_indices.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_tip_indices.diminfo[0].strides));
+    *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_mask.diminfo[0].strides) = 1.0;
   }
 
-  /* "skbio/stats/__subsample.pyx":36
- *         result[permuted[idx]] += 1
+  /* "skbio/diversity/_phylogenetic.pyx":60
+ *     # apply the mask such that tip_ds only includes values which correspond to
+ *     # the tips of the tree.
+ *     for i in range(n_rows):             # <<<<<<<<<<<<<<
+ *         tip_ds[i] *= mask[i]
  * 
- *     return result             # <<<<<<<<<<<<<<
  */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(((PyObject *)__pyx_v_result));
-  __pyx_r = ((PyObject *)__pyx_v_result);
-  goto __pyx_L0;
+  __pyx_t_5 = __pyx_v_n_rows;
+  for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_5; __pyx_t_7+=1) {
+    __pyx_v_i = __pyx_t_7;
+
+    /* "skbio/diversity/_phylogenetic.pyx":61
+ *     # the tips of the tree.
+ *     for i in range(n_rows):
+ *         tip_ds[i] *= mask[i]             # <<<<<<<<<<<<<<
+ * 
+ *     return tip_ds
+ */
+    __pyx_t_20 = __pyx_v_i;
+    __pyx_t_21 = __pyx_v_i;
+    *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_tip_ds.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_tip_ds.diminfo[0].strides) *= (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_mask.diminfo[0].strides));
+  }
 
-  /* "skbio/stats/__subsample.pyx":15
+  /* "skbio/diversity/_phylogenetic.pyx":63
+ *         tip_ds[i] *= mask[i]
+ * 
+ *     return tip_ds             # <<<<<<<<<<<<<<
  * 
  * 
- * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
- *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
- *     cdef:
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(((PyObject *)__pyx_v_tip_ds));
+  __pyx_r = ((PyObject *)__pyx_v_tip_ds);
+  goto __pyx_L0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":19
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _tip_distances(np.ndarray[np.double_t, ndim=1] a, object t,             # <<<<<<<<<<<<<<
+ *                    np.ndarray[DTYPE_t, ndim=1] tip_indices):
+ *     """Sets each tip to its distance from the root
  */
 
   /* function exit code */
@@ -1664,50 +1751,973 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   __Pyx_XDECREF(__pyx_t_1);
   __Pyx_XDECREF(__pyx_t_2);
   __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_10);
+  __Pyx_XDECREF(__pyx_t_11);
   { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
     __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_counts.rcbuffer->pybuffer);
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer);
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result.rcbuffer->pybuffer);
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_a.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_tip_ds.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_tip_indices.rcbuffer->pybuffer);
   __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
-  __Pyx_AddTraceback("skbio.stats.__subsample._subsample_counts_without_replacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_AddTraceback("skbio.diversity._phylogenetic._tip_distances", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = NULL;
   goto __pyx_L2;
   __pyx_L0:;
-  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_counts.rcbuffer->pybuffer);
-  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer);
-  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result.rcbuffer->pybuffer);
-  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_a.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_tip_ds.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_tip_indices.rcbuffer->pybuffer);
   __pyx_L2:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_result);
-  __Pyx_XDECREF((PyObject *)__pyx_v_permuted);
-  __Pyx_XDECREF((PyObject *)__pyx_v_unpacked);
+  __Pyx_XDECREF(__pyx_v_n);
+  __Pyx_XDECREF((PyObject *)__pyx_v_mask);
+  __Pyx_XDECREF((PyObject *)__pyx_v_tip_ds);
   __Pyx_XGIVEREF(__pyx_r);
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
- *         # experimental exception made for __getbuffer__ and __releasebuffer__
- *         # -- the details of this may change.
- *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
- *             # This implementation of getbuffer is geared towards Cython
- *             # requirements, and does not yet fullfill the PEP.
+/* "skbio/diversity/_phylogenetic.pyx":68
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * cdef _traverse_reduce(np.ndarray[DTYPE_t, ndim=2] child_index,             # <<<<<<<<<<<<<<
+ *                       np.ndarray[DTYPE_t, ndim=2] a):
+ *     """Apply a[k] = sum[i:j]
  */
 
-/* Python wrapper */
-static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
-  int __pyx_r;
+static PyObject *__pyx_f_5skbio_9diversity_13_phylogenetic__traverse_reduce(PyArrayObject *__pyx_v_child_index, PyArrayObject *__pyx_v_a) {
+  Py_ssize_t __pyx_v_i;
+  Py_ssize_t __pyx_v_j;
+  Py_ssize_t __pyx_v_k;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_v_node;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_v_start;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_v_end;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_v_n_envs;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_a;
+  __Pyx_Buffer __pyx_pybuffer_a;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_child_index;
+  __Pyx_Buffer __pyx_pybuffer_child_index;
+  PyObject *__pyx_r = NULL;
   __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
+  npy_intp __pyx_t_1;
+  Py_ssize_t __pyx_t_2;
+  Py_ssize_t __pyx_t_3;
+  Py_ssize_t __pyx_t_4;
+  Py_ssize_t __pyx_t_5;
+  Py_ssize_t __pyx_t_6;
+  Py_ssize_t __pyx_t_7;
+  Py_ssize_t __pyx_t_8;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_9;
+  Py_ssize_t __pyx_t_10;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_11;
+  Py_ssize_t __pyx_t_12;
+  Py_ssize_t __pyx_t_13;
+  Py_ssize_t __pyx_t_14;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_15;
+  Py_ssize_t __pyx_t_16;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_traverse_reduce", 0);
+  __pyx_pybuffer_child_index.pybuffer.buf = NULL;
+  __pyx_pybuffer_child_index.refcount = 0;
+  __pyx_pybuffernd_child_index.data = NULL;
+  __pyx_pybuffernd_child_index.rcbuffer = &__pyx_pybuffer_child_index;
+  __pyx_pybuffer_a.pybuffer.buf = NULL;
+  __pyx_pybuffer_a.refcount = 0;
+  __pyx_pybuffernd_a.data = NULL;
+  __pyx_pybuffernd_a.rcbuffer = &__pyx_pybuffer_a;
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_child_index.rcbuffer->pybuffer, (PyObject*)__pyx_v_child_index, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_pybuffernd_child_index.diminfo[0].strides = __pyx_pybuffernd_child_index.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_child_index.diminfo[0].shape = __pyx_pybuffernd_child_index.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_child_index.diminfo[1].strides = __pyx_pybuffernd_child_index.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_child_index.diminfo[1].shape = __pyx_pybuffernd_child_index.rcbuffer->pybuffer.shape[1];
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_a.rcbuffer->pybuffer, (PyObject*)__pyx_v_a, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_pybuffernd_a.diminfo[0].strides = __pyx_pybuffernd_a.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_a.diminfo[0].shape = __pyx_pybuffernd_a.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_a.diminfo[1].strides = __pyx_pybuffernd_a.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_a.diminfo[1].shape = __pyx_pybuffernd_a.rcbuffer->pybuffer.shape[1];
+
+  /* "skbio/diversity/_phylogenetic.pyx":128
+ *         Py_ssize_t i, j, k
+ *         DTYPE_t node, start, end
+ *         DTYPE_t n_envs = a.shape[1]             # <<<<<<<<<<<<<<
+ * 
+ *     # possible GPGPU target
+ */
+  __pyx_v_n_envs = (__pyx_v_a->dimensions[1]);
+
+  /* "skbio/diversity/_phylogenetic.pyx":131
+ * 
+ *     # possible GPGPU target
+ *     for i in range(child_index.shape[0]):             # <<<<<<<<<<<<<<
+ *         node = child_index[i, 0]
+ *         start = child_index[i, 1]
+ */
+  __pyx_t_1 = (__pyx_v_child_index->dimensions[0]);
+  for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
+    __pyx_v_i = __pyx_t_2;
+
+    /* "skbio/diversity/_phylogenetic.pyx":132
+ *     # possible GPGPU target
+ *     for i in range(child_index.shape[0]):
+ *         node = child_index[i, 0]             # <<<<<<<<<<<<<<
+ *         start = child_index[i, 1]
+ *         end = child_index[i, 2]
+ */
+    __pyx_t_3 = __pyx_v_i;
+    __pyx_t_4 = 0;
+    __pyx_v_node = (*__Pyx_BufPtrStrided2d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_child_index.rcbuffer->pybuffer.buf, __pyx_t_3, __pyx_pybuffernd_child_index.diminfo[0].strides, __pyx_t_4, __pyx_pybuffernd_child_index.diminfo[1].strides));
+
+    /* "skbio/diversity/_phylogenetic.pyx":133
+ *     for i in range(child_index.shape[0]):
+ *         node = child_index[i, 0]
+ *         start = child_index[i, 1]             # <<<<<<<<<<<<<<
+ *         end = child_index[i, 2]
+ * 
+ */
+    __pyx_t_5 = __pyx_v_i;
+    __pyx_t_6 = 1;
+    __pyx_v_start = (*__Pyx_BufPtrStrided2d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_child_index.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_child_index.diminfo[0].strides, __pyx_t_6, __pyx_pybuffernd_child_index.diminfo[1].strides));
+
+    /* "skbio/diversity/_phylogenetic.pyx":134
+ *         node = child_index[i, 0]
+ *         start = child_index[i, 1]
+ *         end = child_index[i, 2]             # <<<<<<<<<<<<<<
+ * 
+ *         for j in range(start, end + 1):
+ */
+    __pyx_t_7 = __pyx_v_i;
+    __pyx_t_8 = 2;
+    __pyx_v_end = (*__Pyx_BufPtrStrided2d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_child_index.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_child_index.diminfo[0].strides, __pyx_t_8, __pyx_pybuffernd_child_index.diminfo[1].strides));
+
+    /* "skbio/diversity/_phylogenetic.pyx":136
+ *         end = child_index[i, 2]
+ * 
+ *         for j in range(start, end + 1):             # <<<<<<<<<<<<<<
+ *             for k in range(n_envs):
+ *                 a[node, k] += a[j, k]
+ */
+    __pyx_t_9 = (__pyx_v_end + 1);
+    for (__pyx_t_10 = __pyx_v_start; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
+      __pyx_v_j = __pyx_t_10;
+
+      /* "skbio/diversity/_phylogenetic.pyx":137
+ * 
+ *         for j in range(start, end + 1):
+ *             for k in range(n_envs):             # <<<<<<<<<<<<<<
+ *                 a[node, k] += a[j, k]
+ * 
+ */
+      __pyx_t_11 = __pyx_v_n_envs;
+      for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
+        __pyx_v_k = __pyx_t_12;
+
+        /* "skbio/diversity/_phylogenetic.pyx":138
+ *         for j in range(start, end + 1):
+ *             for k in range(n_envs):
+ *                 a[node, k] += a[j, k]             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+        __pyx_t_13 = __pyx_v_j;
+        __pyx_t_14 = __pyx_v_k;
+        __pyx_t_15 = __pyx_v_node;
+        __pyx_t_16 = __pyx_v_k;
+        *__Pyx_BufPtrStrided2d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_a.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_a.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_a.diminfo[1].strides) += (*__Pyx_BufPtrStrided2d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_a.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_a.diminfo[0].strides, __pyx_t_14, __pyx_pybuffernd_a.diminfo[1].strides));
+      }
+    }
+  }
+
+  /* "skbio/diversity/_phylogenetic.pyx":68
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * cdef _traverse_reduce(np.ndarray[DTYPE_t, ndim=2] child_index,             # <<<<<<<<<<<<<<
+ *                       np.ndarray[DTYPE_t, ndim=2] a):
+ *     """Apply a[k] = sum[i:j]
+ */
+
+  /* function exit code */
+  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_a.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_child_index.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.diversity._phylogenetic._traverse_reduce", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  goto __pyx_L2;
+  __pyx_L0:;
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_a.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_child_index.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/diversity/_phylogenetic.pyx":143
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _nodes_by_counts(np.ndarray counts,             # <<<<<<<<<<<<<<
+ *                      np.ndarray tip_ids,
+ *                      dict indexed):
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9diversity_13_phylogenetic_3_nodes_by_counts(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_5skbio_9diversity_13_phylogenetic_2_nodes_by_counts[] = "Construct the count array, and the counts up the tree\n\n    Parameters\n    ----------\n    counts : np.array of int\n        A 1D or 2D vector in which each row corresponds to the observed counts\n        in an environment. The rows are expected to be in order with respect to\n        `tip_ids`.\n    tip_ids : np.array of str\n        A vector of tip names that correspond to the columns in the `counts`\n     [...]
+static PyMethodDef __pyx_mdef_5skbio_9diversity_13_phylogenetic_3_nodes_by_counts = {"_nodes_by_counts", (PyCFunction)__pyx_pw_5skbio_9diversity_13_phylogenetic_3_nodes_by_counts, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5skbio_9diversity_13_phylogenetic_2_nodes_by_counts};
+static PyObject *__pyx_pw_5skbio_9diversity_13_phylogenetic_3_nodes_by_counts(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyArrayObject *__pyx_v_counts = 0;
+  PyArrayObject *__pyx_v_tip_ids = 0;
+  PyObject *__pyx_v_indexed = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("_nodes_by_counts (wrapper)", 0);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_counts,&__pyx_n_s_tip_ids,&__pyx_n_s_indexed,0};
+    PyObject* values[3] = {0,0,0};
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+        case  1:
+        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_tip_ids)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_nodes_by_counts", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+        case  2:
+        if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_indexed)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_nodes_by_counts", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_nodes_by_counts") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+      goto __pyx_L5_argtuple_error;
+    } else {
+      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+    }
+    __pyx_v_counts = ((PyArrayObject *)values[0]);
+    __pyx_v_tip_ids = ((PyArrayObject *)values[1]);
+    __pyx_v_indexed = ((PyObject*)values[2]);
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("_nodes_by_counts", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_AddTraceback("skbio.diversity._phylogenetic._nodes_by_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return NULL;
+  __pyx_L4_argument_unpacking_done:;
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_counts), __pyx_ptype_5numpy_ndarray, 1, "counts", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_tip_ids), __pyx_ptype_5numpy_ndarray, 1, "tip_ids", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_indexed), (&PyDict_Type), 1, "indexed", 1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_r = __pyx_pf_5skbio_9diversity_13_phylogenetic_2_nodes_by_counts(__pyx_self, __pyx_v_counts, __pyx_v_tip_ids, __pyx_v_indexed);
+
+  /* function exit code */
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9diversity_13_phylogenetic_2_nodes_by_counts(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyArrayObject *__pyx_v_tip_ids, PyObject *__pyx_v_indexed) {
+  PyArrayObject *__pyx_v_nodes = 0;
+  PyArrayObject *__pyx_v_observed_ids = 0;
+  PyArrayObject *__pyx_v_count_array = 0;
+  PyArrayObject *__pyx_v_counts_t = 0;
+  PyArrayObject *__pyx_v_observed_indices = 0;
+  PyArrayObject *__pyx_v_otus_in_nodes = 0;
+  Py_ssize_t __pyx_v_i;
+  Py_ssize_t __pyx_v_j;
+  PyObject *__pyx_v_observed_ids_set = 0;
+  PyObject *__pyx_v_n = 0;
+  PyObject *__pyx_v_node_lookup = 0;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_v_n_count_vectors;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_v_n_count_otus;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_count_array;
+  __Pyx_Buffer __pyx_pybuffer_count_array;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_counts_t;
+  __Pyx_Buffer __pyx_pybuffer_counts_t;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_observed_indices;
+  __Pyx_Buffer __pyx_pybuffer_observed_indices;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_otus_in_nodes;
+  __Pyx_Buffer __pyx_pybuffer_otus_in_nodes;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  PyArrayObject *__pyx_t_6 = NULL;
+  int __pyx_t_7;
+  PyObject *__pyx_t_8 = NULL;
+  PyObject *__pyx_t_9 = NULL;
+  PyObject *__pyx_t_10 = NULL;
+  npy_intp __pyx_t_11;
+  Py_ssize_t __pyx_t_12;
+  int __pyx_t_13;
+  int __pyx_t_14;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_15;
+  Py_ssize_t __pyx_t_16;
+  PyArrayObject *__pyx_t_17 = NULL;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_18;
+  Py_ssize_t __pyx_t_19;
+  Py_ssize_t __pyx_t_20;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_21;
+  Py_ssize_t __pyx_t_22;
+  Py_ssize_t __pyx_t_23;
+  __pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t __pyx_t_24;
+  Py_ssize_t __pyx_t_25;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_nodes_by_counts", 0);
+  __Pyx_INCREF((PyObject *)__pyx_v_counts);
+  __pyx_pybuffer_count_array.pybuffer.buf = NULL;
+  __pyx_pybuffer_count_array.refcount = 0;
+  __pyx_pybuffernd_count_array.data = NULL;
+  __pyx_pybuffernd_count_array.rcbuffer = &__pyx_pybuffer_count_array;
+  __pyx_pybuffer_counts_t.pybuffer.buf = NULL;
+  __pyx_pybuffer_counts_t.refcount = 0;
+  __pyx_pybuffernd_counts_t.data = NULL;
+  __pyx_pybuffernd_counts_t.rcbuffer = &__pyx_pybuffer_counts_t;
+  __pyx_pybuffer_observed_indices.pybuffer.buf = NULL;
+  __pyx_pybuffer_observed_indices.refcount = 0;
+  __pyx_pybuffernd_observed_indices.data = NULL;
+  __pyx_pybuffernd_observed_indices.rcbuffer = &__pyx_pybuffer_observed_indices;
+  __pyx_pybuffer_otus_in_nodes.pybuffer.buf = NULL;
+  __pyx_pybuffer_otus_in_nodes.refcount = 0;
+  __pyx_pybuffernd_otus_in_nodes.data = NULL;
+  __pyx_pybuffernd_otus_in_nodes.rcbuffer = &__pyx_pybuffer_otus_in_nodes;
+
+  /* "skbio/diversity/_phylogenetic.pyx":176
+ *         DTYPE_t n_count_vectors, n_count_otus
+ * 
+ *     nodes = indexed['name']             # <<<<<<<<<<<<<<
+ * 
+ *     # allow counts to be a vector
+ */
+  if (unlikely(__pyx_v_indexed == Py_None)) {
+    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_1 = __Pyx_PyDict_GetItem(__pyx_v_indexed, __pyx_n_s_name); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  __Pyx_GOTREF(__pyx_t_1);
+  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_nodes = ((PyArrayObject *)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":179
+ * 
+ *     # allow counts to be a vector
+ *     counts = np.atleast_2d(counts)             # <<<<<<<<<<<<<<
+ *     counts = counts.astype(DTYPE)
+ * 
+ */
+  __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_atleast_2d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_2 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
+    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
+    if (likely(__pyx_t_2)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+      __Pyx_INCREF(__pyx_t_2);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_3, function);
+    }
+  }
+  if (!__pyx_t_2) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, ((PyObject *)__pyx_v_counts)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+  } else {
+    __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL;
+    __Pyx_INCREF(((PyObject *)__pyx_v_counts));
+    __Pyx_GIVEREF(((PyObject *)__pyx_v_counts));
+    PyTuple_SET_ITEM(__pyx_t_4, 0+1, ((PyObject *)__pyx_v_counts));
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF_SET(__pyx_v_counts, ((PyArrayObject *)__pyx_t_1));
+  __pyx_t_1 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":180
+ *     # allow counts to be a vector
+ *     counts = np.atleast_2d(counts)
+ *     counts = counts.astype(DTYPE)             # <<<<<<<<<<<<<<
+ * 
+ *     # determine observed IDs. It may be possible to unroll these calls to
+ */
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_counts), __pyx_n_s_astype); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __pyx_t_2 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_3))) {
+    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
+    if (likely(__pyx_t_2)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+      __Pyx_INCREF(__pyx_t_2);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_3, function);
+    }
+  }
+  if (!__pyx_t_2) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __Pyx_GOTREF(__pyx_t_1);
+  } else {
+    __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL;
+    __Pyx_GIVEREF(__pyx_t_4);
+    PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4);
+    __pyx_t_4 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF_SET(__pyx_v_counts, ((PyArrayObject *)__pyx_t_1));
+  __pyx_t_1 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":184
+ *     # determine observed IDs. It may be possible to unroll these calls to
+ *     # squeeze a little more performance
+ *     observed_indices = counts.sum(0).nonzero()[0]             # <<<<<<<<<<<<<<
+ *     observed_ids = tip_ids[observed_indices]
+ *     observed_ids_set = set(observed_ids)
+ */
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_counts), __pyx_n_s_sum); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_nonzero); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __pyx_t_5 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_3))) {
+    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3);
+    if (likely(__pyx_t_5)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+      __Pyx_INCREF(__pyx_t_5);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_3, function);
+    }
+  }
+  if (__pyx_t_5) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  } else {
+    __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = ((PyArrayObject *)__pyx_t_3);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_observed_indices.rcbuffer->pybuffer);
+    __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_observed_indices.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_7 < 0)) {
+      PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_observed_indices.rcbuffer->pybuffer, (PyObject*)__pyx_v_observed_indices, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
+      }
+    }
+    __pyx_pybuffernd_observed_indices.diminfo[0].strides = __pyx_pybuffernd_observed_indices.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_observed_indices.diminfo[0].shape = __pyx_pybuffernd_observed_indices.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_6 = 0;
+  __pyx_v_observed_indices = ((PyArrayObject *)__pyx_t_3);
+  __pyx_t_3 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":185
+ *     # squeeze a little more performance
+ *     observed_indices = counts.sum(0).nonzero()[0]
+ *     observed_ids = tip_ids[observed_indices]             # <<<<<<<<<<<<<<
+ *     observed_ids_set = set(observed_ids)
+ * 
+ */
+  __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_tip_ids), ((PyObject *)__pyx_v_observed_indices)); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  __Pyx_GOTREF(__pyx_t_3);
+  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_observed_ids = ((PyArrayObject *)__pyx_t_3);
+  __pyx_t_3 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":186
+ *     observed_indices = counts.sum(0).nonzero()[0]
+ *     observed_ids = tip_ids[observed_indices]
+ *     observed_ids_set = set(observed_ids)             # <<<<<<<<<<<<<<
+ * 
+ *     # construct mappings of the observed to their positions in the node array
+ */
+  __pyx_t_3 = PySet_New(((PyObject *)__pyx_v_observed_ids)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_v_observed_ids_set = ((PyObject*)__pyx_t_3);
+  __pyx_t_3 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":189
+ * 
+ *     # construct mappings of the observed to their positions in the node array
+ *     node_lookup = {}             # <<<<<<<<<<<<<<
+ *     for i in range(nodes.shape[0]):
+ *         n = nodes[i]
+ */
+  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_v_node_lookup = ((PyObject*)__pyx_t_3);
+  __pyx_t_3 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":190
+ *     # construct mappings of the observed to their positions in the node array
+ *     node_lookup = {}
+ *     for i in range(nodes.shape[0]):             # <<<<<<<<<<<<<<
+ *         n = nodes[i]
+ *         if n in observed_ids_set:
+ */
+  __pyx_t_11 = (__pyx_v_nodes->dimensions[0]);
+  for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
+    __pyx_v_i = __pyx_t_12;
+
+    /* "skbio/diversity/_phylogenetic.pyx":191
+ *     node_lookup = {}
+ *     for i in range(nodes.shape[0]):
+ *         n = nodes[i]             # <<<<<<<<<<<<<<
+ *         if n in observed_ids_set:
+ *             node_lookup[n] = i
+ */
+    __pyx_t_3 = __Pyx_GetItemInt(((PyObject *)__pyx_v_nodes), __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_XDECREF_SET(__pyx_v_n, __pyx_t_3);
+    __pyx_t_3 = 0;
+
+    /* "skbio/diversity/_phylogenetic.pyx":192
+ *     for i in range(nodes.shape[0]):
+ *         n = nodes[i]
+ *         if n in observed_ids_set:             # <<<<<<<<<<<<<<
+ *             node_lookup[n] = i
+ * 
+ */
+    __pyx_t_13 = (__Pyx_PySequence_ContainsTF(__pyx_v_n, __pyx_v_observed_ids_set, Py_EQ)); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_14 = (__pyx_t_13 != 0);
+    if (__pyx_t_14) {
+
+      /* "skbio/diversity/_phylogenetic.pyx":193
+ *         n = nodes[i]
+ *         if n in observed_ids_set:
+ *             node_lookup[n] = i             # <<<<<<<<<<<<<<
+ * 
+ *     # determine the positions of the observed IDs in nodes
+ */
+      __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_i); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      if (unlikely(PyDict_SetItem(__pyx_v_node_lookup, __pyx_v_n, __pyx_t_3) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+      /* "skbio/diversity/_phylogenetic.pyx":192
+ *     for i in range(nodes.shape[0]):
+ *         n = nodes[i]
+ *         if n in observed_ids_set:             # <<<<<<<<<<<<<<
+ *             node_lookup[n] = i
+ * 
+ */
+    }
+  }
+
+  /* "skbio/diversity/_phylogenetic.pyx":196
+ * 
+ *     # determine the positions of the observed IDs in nodes
+ *     otus_in_nodes = np.zeros(observed_ids.shape[0], dtype=DTYPE)             # <<<<<<<<<<<<<<
+ *     for i in range(observed_ids.shape[0]):
+ *         n = observed_ids[i]
+ */
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t((__pyx_v_observed_ids->dimensions[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_GIVEREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+  __pyx_t_3 = 0;
+  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = ((PyArrayObject *)__pyx_t_4);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer);
+    __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_7 < 0)) {
+      PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer, (PyObject*)__pyx_v_otus_in_nodes, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
+      }
+    }
+    __pyx_pybuffernd_otus_in_nodes.diminfo[0].strides = __pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_otus_in_nodes.diminfo[0].shape = __pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_6 = 0;
+  __pyx_v_otus_in_nodes = ((PyArrayObject *)__pyx_t_4);
+  __pyx_t_4 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":197
+ *     # determine the positions of the observed IDs in nodes
+ *     otus_in_nodes = np.zeros(observed_ids.shape[0], dtype=DTYPE)
+ *     for i in range(observed_ids.shape[0]):             # <<<<<<<<<<<<<<
+ *         n = observed_ids[i]
+ *         otus_in_nodes[i] = node_lookup[n]
+ */
+  __pyx_t_11 = (__pyx_v_observed_ids->dimensions[0]);
+  for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
+    __pyx_v_i = __pyx_t_12;
+
+    /* "skbio/diversity/_phylogenetic.pyx":198
+ *     otus_in_nodes = np.zeros(observed_ids.shape[0], dtype=DTYPE)
+ *     for i in range(observed_ids.shape[0]):
+ *         n = observed_ids[i]             # <<<<<<<<<<<<<<
+ *         otus_in_nodes[i] = node_lookup[n]
+ * 
+ */
+    __pyx_t_4 = __Pyx_GetItemInt(((PyObject *)__pyx_v_observed_ids), __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_XDECREF_SET(__pyx_v_n, __pyx_t_4);
+    __pyx_t_4 = 0;
+
+    /* "skbio/diversity/_phylogenetic.pyx":199
+ *     for i in range(observed_ids.shape[0]):
+ *         n = observed_ids[i]
+ *         otus_in_nodes[i] = node_lookup[n]             # <<<<<<<<<<<<<<
+ * 
+ *     # count_array has a row per node (not tip) and a column per env.
+ */
+    __pyx_t_4 = __Pyx_PyDict_GetItem(__pyx_v_node_lookup, __pyx_v_n); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_15 = __Pyx_PyInt_As_npy_int64(__pyx_t_4); if (unlikely((__pyx_t_15 == (npy_int64)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __pyx_t_16 = __pyx_v_i;
+    *__Pyx_BufPtrStrided1d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_otus_in_nodes.diminfo[0].strides) = __pyx_t_15;
+  }
+
+  /* "skbio/diversity/_phylogenetic.pyx":202
+ * 
+ *     # count_array has a row per node (not tip) and a column per env.
+ *     n_count_vectors = counts.shape[0]             # <<<<<<<<<<<<<<
+ *     count_array = np.zeros((nodes.shape[0], n_count_vectors), dtype=DTYPE)
+ * 
+ */
+  __pyx_v_n_count_vectors = (__pyx_v_counts->dimensions[0]);
+
+  /* "skbio/diversity/_phylogenetic.pyx":203
+ *     # count_array has a row per node (not tip) and a column per env.
+ *     n_count_vectors = counts.shape[0]
+ *     count_array = np.zeros((nodes.shape[0], n_count_vectors), dtype=DTYPE)             # <<<<<<<<<<<<<<
+ * 
+ *     # populate the counts array with the counts of each observation in each
+ */
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t((__pyx_v_nodes->dimensions[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __pyx_t_5 = __Pyx_PyInt_From_npy_int64(__pyx_v_n_count_vectors); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_4);
+  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
+  __Pyx_GIVEREF(__pyx_t_5);
+  PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5);
+  __pyx_t_4 = 0;
+  __pyx_t_5 = 0;
+  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_17 = ((PyArrayObject *)__pyx_t_4);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_count_array.rcbuffer->pybuffer);
+    __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_count_array.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack);
+    if (unlikely(__pyx_t_7 < 0)) {
+      PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_count_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_count_array, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
+      }
+    }
+    __pyx_pybuffernd_count_array.diminfo[0].strides = __pyx_pybuffernd_count_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_count_array.diminfo[0].shape = __pyx_pybuffernd_count_array.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_count_array.diminfo[1].strides = __pyx_pybuffernd_count_array.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_count_array.diminfo[1].shape = __pyx_pybuffernd_count_array.rcbuffer->pybuffer.shape[1];
+    if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_17 = 0;
+  __pyx_v_count_array = ((PyArrayObject *)__pyx_t_4);
+  __pyx_t_4 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":207
+ *     # populate the counts array with the counts of each observation in each
+ *     # env
+ *     counts_t = counts.transpose()             # <<<<<<<<<<<<<<
+ *     n_count_otus = otus_in_nodes.shape[0]
+ *     for i in range(n_count_otus):
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_counts), __pyx_n_s_transpose); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_5 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) {
+    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1);
+    if (likely(__pyx_t_5)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
+      __Pyx_INCREF(__pyx_t_5);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_1, function);
+    }
+  }
+  if (__pyx_t_5) {
+    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  } else {
+    __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_17 = ((PyArrayObject *)__pyx_t_4);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_counts_t.rcbuffer->pybuffer);
+    __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_counts_t.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
+    if (unlikely(__pyx_t_7 < 0)) {
+      PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_counts_t.rcbuffer->pybuffer, (PyObject*)__pyx_v_counts_t, &__Pyx_TypeInfo_nn___pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
+      }
+    }
+    __pyx_pybuffernd_counts_t.diminfo[0].strides = __pyx_pybuffernd_counts_t.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_counts_t.diminfo[0].shape = __pyx_pybuffernd_counts_t.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_counts_t.diminfo[1].strides = __pyx_pybuffernd_counts_t.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_counts_t.diminfo[1].shape = __pyx_pybuffernd_counts_t.rcbuffer->pybuffer.shape[1];
+    if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_17 = 0;
+  __pyx_v_counts_t = ((PyArrayObject *)__pyx_t_4);
+  __pyx_t_4 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":208
+ *     # env
+ *     counts_t = counts.transpose()
+ *     n_count_otus = otus_in_nodes.shape[0]             # <<<<<<<<<<<<<<
+ *     for i in range(n_count_otus):
+ *         for j in range(n_count_vectors):
+ */
+  __pyx_v_n_count_otus = (__pyx_v_otus_in_nodes->dimensions[0]);
+
+  /* "skbio/diversity/_phylogenetic.pyx":209
+ *     counts_t = counts.transpose()
+ *     n_count_otus = otus_in_nodes.shape[0]
+ *     for i in range(n_count_otus):             # <<<<<<<<<<<<<<
+ *         for j in range(n_count_vectors):
+ *             count_array[otus_in_nodes[i], j] = counts_t[observed_indices[i], j]
+ */
+  __pyx_t_15 = __pyx_v_n_count_otus;
+  for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_15; __pyx_t_12+=1) {
+    __pyx_v_i = __pyx_t_12;
+
+    /* "skbio/diversity/_phylogenetic.pyx":210
+ *     n_count_otus = otus_in_nodes.shape[0]
+ *     for i in range(n_count_otus):
+ *         for j in range(n_count_vectors):             # <<<<<<<<<<<<<<
+ *             count_array[otus_in_nodes[i], j] = counts_t[observed_indices[i], j]
+ * 
+ */
+    __pyx_t_18 = __pyx_v_n_count_vectors;
+    for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) {
+      __pyx_v_j = __pyx_t_19;
+
+      /* "skbio/diversity/_phylogenetic.pyx":211
+ *     for i in range(n_count_otus):
+ *         for j in range(n_count_vectors):
+ *             count_array[otus_in_nodes[i], j] = counts_t[observed_indices[i], j]             # <<<<<<<<<<<<<<
+ * 
+ *     _traverse_reduce(indexed['child_index'], count_array)
+ */
+      __pyx_t_20 = __pyx_v_i;
+      __pyx_t_21 = (*__Pyx_BufPtrStrided1d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_observed_indices.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_observed_indices.diminfo[0].strides));
+      __pyx_t_22 = __pyx_v_j;
+      __pyx_t_23 = __pyx_v_i;
+      __pyx_t_24 = (*__Pyx_BufPtrStrided1d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_otus_in_nodes.diminfo[0].strides));
+      __pyx_t_25 = __pyx_v_j;
+      *__Pyx_BufPtrStrided2d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_count_array.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_count_array.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_count_array.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(__pyx_t_5skbio_9diversity_13_phylogenetic_DTYPE_t *, __pyx_pybuffernd_counts_t.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_counts_t.diminfo[0].strides, __pyx_t_22, __pyx_pybuffernd_counts_t.diminfo[1] [...]
+    }
+  }
+
+  /* "skbio/diversity/_phylogenetic.pyx":213
+ *             count_array[otus_in_nodes[i], j] = counts_t[observed_indices[i], j]
+ * 
+ *     _traverse_reduce(indexed['child_index'], count_array)             # <<<<<<<<<<<<<<
+ * 
+ *     return count_array
+ */
+  if (unlikely(__pyx_v_indexed == Py_None)) {
+    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_4 = __Pyx_PyDict_GetItem(__pyx_v_indexed, __pyx_n_s_child_index); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  __Pyx_GOTREF(__pyx_t_4);
+  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_f_5skbio_9diversity_13_phylogenetic__traverse_reduce(((PyArrayObject *)__pyx_t_4), ((PyArrayObject *)__pyx_v_count_array)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":215
+ *     _traverse_reduce(indexed['child_index'], count_array)
+ * 
+ *     return count_array             # <<<<<<<<<<<<<<
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(((PyObject *)__pyx_v_count_array));
+  __pyx_r = ((PyObject *)__pyx_v_count_array);
+  goto __pyx_L0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":143
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _nodes_by_counts(np.ndarray counts,             # <<<<<<<<<<<<<<
+ *                      np.ndarray tip_ids,
+ *                      dict indexed):
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
+  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_count_array.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_counts_t.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_observed_indices.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.diversity._phylogenetic._nodes_by_counts", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  goto __pyx_L2;
+  __pyx_L0:;
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_count_array.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_counts_t.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_observed_indices.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_otus_in_nodes.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_nodes);
+  __Pyx_XDECREF((PyObject *)__pyx_v_observed_ids);
+  __Pyx_XDECREF((PyObject *)__pyx_v_count_array);
+  __Pyx_XDECREF((PyObject *)__pyx_v_counts_t);
+  __Pyx_XDECREF((PyObject *)__pyx_v_observed_indices);
+  __Pyx_XDECREF((PyObject *)__pyx_v_otus_in_nodes);
+  __Pyx_XDECREF(__pyx_v_observed_ids_set);
+  __Pyx_XDECREF(__pyx_v_n);
+  __Pyx_XDECREF(__pyx_v_node_lookup);
+  __Pyx_XDECREF((PyObject *)__pyx_v_counts);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":197
+ *         # experimental exception made for __getbuffer__ and __releasebuffer__
+ *         # -- the details of this may change.
+ *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
+ *             # This implementation of getbuffer is geared towards Cython
+ *             # requirements, and does not yet fullfill the PEP.
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
+  __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
 }
 
 static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
@@ -1739,7 +2749,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GIVEREF(__pyx_v_info->obj);
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":203
  *             # of flags
  * 
  *             if info == NULL: return             # <<<<<<<<<<<<<<
@@ -1752,7 +2762,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L0;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":206
  * 
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1             # <<<<<<<<<<<<<<
@@ -1761,7 +2771,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_endian_detector = 1;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":207
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
@@ -1770,7 +2780,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":209
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  * 
  *             ndim = PyArray_NDIM(self)             # <<<<<<<<<<<<<<
@@ -1779,7 +2789,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":211
  *             ndim = PyArray_NDIM(self)
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -1789,7 +2799,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":212
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 copy_shape = 1             # <<<<<<<<<<<<<<
@@ -1797,22 +2807,30 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  *                 copy_shape = 0
  */
     __pyx_v_copy_shape = 1;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":211
+ *             ndim = PyArray_NDIM(self)
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 copy_shape = 1
+ *             else:
+ */
     goto __pyx_L4;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":214
  *                 copy_shape = 1
  *             else:
  *                 copy_shape = 0             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  */
+  /*else*/ {
     __pyx_v_copy_shape = 0;
   }
   __pyx_L4:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":216
  *                 copy_shape = 0
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
@@ -1826,7 +2844,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L6_bool_binop_done;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":217
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):             # <<<<<<<<<<<<<<
@@ -1836,23 +2854,39 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
   __pyx_t_1 = __pyx_t_2;
   __pyx_L6_bool_binop_done:;
+
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":216
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  */
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_Raise(__pyx_t_3, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":216
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":220
  *                 raise ValueError(u"ndarray is not C contiguous")
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
@@ -1866,7 +2900,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L9_bool_binop_done;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":221
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):             # <<<<<<<<<<<<<<
@@ -1876,23 +2910,39 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
   __pyx_t_1 = __pyx_t_2;
   __pyx_L9_bool_binop_done:;
+
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":220
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
  * 
  *             info.buf = PyArray_DATA(self)
  */
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_Raise(__pyx_t_3, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":220
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":224
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  * 
  *             info.buf = PyArray_DATA(self)             # <<<<<<<<<<<<<<
@@ -1901,7 +2951,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":225
  * 
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim             # <<<<<<<<<<<<<<
@@ -1910,7 +2960,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->ndim = __pyx_v_ndim;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":226
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim
  *             if copy_shape:             # <<<<<<<<<<<<<<
@@ -1920,7 +2970,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (__pyx_v_copy_shape != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":229
  *                 # Allocate new buffer for strides and shape info.
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)             # <<<<<<<<<<<<<<
@@ -1929,7 +2979,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":230
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim             # <<<<<<<<<<<<<<
@@ -1938,7 +2988,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":231
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):             # <<<<<<<<<<<<<<
@@ -1949,7 +2999,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
       __pyx_v_i = __pyx_t_5;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":232
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]             # <<<<<<<<<<<<<<
@@ -1958,7 +3008,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":233
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]
  *                     info.shape[i] = PyArray_DIMS(self)[i]             # <<<<<<<<<<<<<<
@@ -1967,20 +3017,28 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
     }
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":226
+ *             info.buf = PyArray_DATA(self)
+ *             info.ndim = ndim
+ *             if copy_shape:             # <<<<<<<<<<<<<<
+ *                 # Allocate new buffer for strides and shape info.
+ *                 # This is allocated as one block, strides first.
+ */
     goto __pyx_L11;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":235
  *                     info.shape[i] = PyArray_DIMS(self)[i]
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)             # <<<<<<<<<<<<<<
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  */
+  /*else*/ {
     __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":236
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)             # <<<<<<<<<<<<<<
@@ -1991,7 +3049,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L11:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":237
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
@@ -2000,7 +3058,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->suboffsets = NULL;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":238
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)             # <<<<<<<<<<<<<<
@@ -2009,7 +3067,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":239
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)
  *             info.readonly = not PyArray_ISWRITEABLE(self)             # <<<<<<<<<<<<<<
@@ -2018,28 +3076,28 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":242
  * 
  *             cdef int t
  *             cdef char* f = NULL             # <<<<<<<<<<<<<<
  *             cdef dtype descr = self.descr
- *             cdef list stack
+ *             cdef int offset
  */
   __pyx_v_f = NULL;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":243
  *             cdef int t
  *             cdef char* f = NULL
  *             cdef dtype descr = self.descr             # <<<<<<<<<<<<<<
- *             cdef list stack
  *             cdef int offset
+ * 
  */
   __pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
   __Pyx_INCREF(__pyx_t_3);
   __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
   __pyx_t_3 = 0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":247
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":246
  *             cdef int offset
  * 
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)             # <<<<<<<<<<<<<<
@@ -2048,7 +3106,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":249
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":248
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
  * 
  *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
@@ -2066,7 +3124,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_L15_bool_binop_done:;
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":250
  *             if not hasfields and not copy_shape:
  *                 # do not call releasebuffer
  *                 info.obj = None             # <<<<<<<<<<<<<<
@@ -2078,17 +3136,25 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GOTREF(__pyx_v_info->obj);
     __Pyx_DECREF(__pyx_v_info->obj);
     __pyx_v_info->obj = Py_None;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":248
+ *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
+ * 
+ *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
+ *                 # do not call releasebuffer
+ *                 info.obj = None
+ */
     goto __pyx_L14;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":253
  *             else:
  *                 # need to call releasebuffer
  *                 info.obj = self             # <<<<<<<<<<<<<<
  * 
  *             if not hasfields:
  */
+  /*else*/ {
     __Pyx_INCREF(((PyObject *)__pyx_v_self));
     __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
     __Pyx_GOTREF(__pyx_v_info->obj);
@@ -2097,7 +3163,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L14:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":255
  *                 info.obj = self
  * 
  *             if not hasfields:             # <<<<<<<<<<<<<<
@@ -2107,7 +3173,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":256
  * 
  *             if not hasfields:
  *                 t = descr.type_num             # <<<<<<<<<<<<<<
@@ -2117,7 +3183,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __pyx_t_4 = __pyx_v_descr->type_num;
     __pyx_v_t = __pyx_t_4;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":257
  *             if not hasfields:
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
@@ -2137,7 +3203,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     }
     __pyx_L20_next_or:;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":258
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
@@ -2153,43 +3219,51 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
     __pyx_t_1 = __pyx_t_2;
     __pyx_L19_bool_binop_done:;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
+ */
     if (__pyx_t_1) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    }
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
- *                 elif t == NPY_CDOUBLE:     f = "Zd"
- *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
- *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
- *                 else:
- *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
  */
-    switch (__pyx_v_t) {
+    }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":260
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"             # <<<<<<<<<<<<<<
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  */
+    switch (__pyx_v_t) {
       case NPY_BYTE:
       __pyx_v_f = __pyx_k_b;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":261
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"             # <<<<<<<<<<<<<<
@@ -2200,7 +3274,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_B;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":262
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"             # <<<<<<<<<<<<<<
@@ -2211,7 +3285,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_h;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":263
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"             # <<<<<<<<<<<<<<
@@ -2222,7 +3296,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_H;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":264
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"             # <<<<<<<<<<<<<<
@@ -2233,7 +3307,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_i;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":265
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"             # <<<<<<<<<<<<<<
@@ -2244,7 +3318,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_I;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":266
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"             # <<<<<<<<<<<<<<
@@ -2255,7 +3329,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_l;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":267
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"             # <<<<<<<<<<<<<<
@@ -2266,7 +3340,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_L;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":268
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"             # <<<<<<<<<<<<<<
@@ -2277,7 +3351,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_q;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":269
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"             # <<<<<<<<<<<<<<
@@ -2288,7 +3362,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Q;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":270
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"             # <<<<<<<<<<<<<<
@@ -2299,7 +3373,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_f;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":271
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"             # <<<<<<<<<<<<<<
@@ -2310,7 +3384,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_d;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":272
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"             # <<<<<<<<<<<<<<
@@ -2321,7 +3395,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_g;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":273
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"             # <<<<<<<<<<<<<<
@@ -2332,7 +3406,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zf;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":274
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"             # <<<<<<<<<<<<<<
@@ -2343,7 +3417,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zd;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":275
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"             # <<<<<<<<<<<<<<
@@ -2354,7 +3428,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zg;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":276
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
  *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
@@ -2366,33 +3440,33 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       break;
       default:
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":278
  *                 elif t == NPY_OBJECT:      f = "O"
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *                 info.format = f
  *                 return
  */
-      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
       __Pyx_GIVEREF(__pyx_t_6);
+      PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
       __pyx_t_6 = 0;
-      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       __Pyx_Raise(__pyx_t_6, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       break;
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":279
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f             # <<<<<<<<<<<<<<
@@ -2401,7 +3475,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->format = __pyx_v_f;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":280
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f
  *                 return             # <<<<<<<<<<<<<<
@@ -2410,19 +3484,27 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_r = 0;
     goto __pyx_L0;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":255
+ *                 info.obj = self
+ * 
+ *             if not hasfields:             # <<<<<<<<<<<<<<
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ */
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":282
  *                 return
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)             # <<<<<<<<<<<<<<
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  */
-    __pyx_v_info->format = ((char *)malloc(255));
+  /*else*/ {
+    __pyx_v_info->format = ((char *)malloc(0xFF));
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":283
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment             # <<<<<<<<<<<<<<
@@ -2431,7 +3513,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     (__pyx_v_info->format[0]) = '^';
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":284
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0             # <<<<<<<<<<<<<<
@@ -2440,17 +3522,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_offset = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":285
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  *                 f = _util_dtypestring(descr, info.format + 1,             # <<<<<<<<<<<<<<
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  */
-    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_v_f = __pyx_t_7;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":288
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  *                 f[0] = c'\0' # Terminate format string             # <<<<<<<<<<<<<<
@@ -2460,7 +3542,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     (__pyx_v_f[0]) = '\x00';
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -2492,7 +3574,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":290
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -2516,7 +3598,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__releasebuffer__", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":291
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
@@ -2526,7 +3608,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":292
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)             # <<<<<<<<<<<<<<
@@ -2534,11 +3616,17 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
  *                 stdlib.free(info.strides)
  */
     free(__pyx_v_info->format);
-    goto __pyx_L3;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":291
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
   }
-  __pyx_L3:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":293
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -2548,7 +3636,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":295
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":294
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 stdlib.free(info.strides)             # <<<<<<<<<<<<<<
@@ -2556,11 +3644,17 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
  * 
  */
     free(__pyx_v_info->strides);
-    goto __pyx_L4;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":293
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.strides)
+ *                 # info.shape was stored after info.strides in the same block
+ */
   }
-  __pyx_L4:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":290
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -2572,7 +3666,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __Pyx_RefNannyFinishContext();
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":770
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -2589,7 +3683,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":771
  * 
  * cdef inline object PyArray_MultiIterNew1(a):
  *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
@@ -2597,13 +3691,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
  * cdef inline object PyArray_MultiIterNew2(a, b):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":770
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -2622,7 +3716,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":773
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -2639,7 +3733,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":774
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
@@ -2647,13 +3741,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":773
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -2672,7 +3766,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":776
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -2689,7 +3783,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":777
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
@@ -2697,13 +3791,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":776
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -2722,7 +3816,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":779
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -2739,7 +3833,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":780
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
@@ -2747,13 +3841,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":779
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -2772,7 +3866,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":782
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -2789,7 +3883,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":783
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
@@ -2797,13 +3891,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":782
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -2822,7 +3916,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":785
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -2854,17 +3948,17 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_util_dtypestring", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":793
- *     cdef int delta_offset
- *     cdef tuple i
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":790
+ * 
+ *     cdef dtype child
  *     cdef int endian_detector = 1             # <<<<<<<<<<<<<<
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  *     cdef tuple fields
  */
   __pyx_v_endian_detector = 1;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
- *     cdef tuple i
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":791
+ *     cdef dtype child
  *     cdef int endian_detector = 1
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
  *     cdef tuple fields
@@ -2872,7 +3966,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -2881,20 +3975,21 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   if (unlikely(__pyx_v_descr->names == Py_None)) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
   for (;;) {
     if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
     #if CYTHON_COMPILING_IN_CPYTHON
-    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #else
-    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
     #endif
     __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
     __pyx_t_3 = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":795
  * 
  *     for childname in descr.names:
  *         fields = descr.fields[childname]             # <<<<<<<<<<<<<<
@@ -2903,15 +3998,15 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
     if (unlikely(__pyx_v_descr->fields == Py_None)) {
       PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_3);
-    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
     __pyx_t_3 = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":796
  *     for childname in descr.names:
  *         fields = descr.fields[childname]
  *         child, new_offset = fields             # <<<<<<<<<<<<<<
@@ -2928,7 +4023,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       if (unlikely(size != 2)) {
         if (size > 2) __Pyx_RaiseTooManyValuesError(2);
         else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       #if CYTHON_COMPILING_IN_CPYTHON
       __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
@@ -2936,52 +4031,60 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __Pyx_INCREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_t_4);
       #else
-      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       #endif
     } else {
-      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
     __pyx_t_3 = 0;
     __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
     __pyx_t_4 = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":798
  *         child, new_offset = fields
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  */
-    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
     if (__pyx_t_6) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":799
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":798
+ *         child, new_offset = fields
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ */
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":801
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
@@ -3001,7 +4104,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     }
     __pyx_L8_next_or:;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":802
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
@@ -3017,23 +4120,39 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
     __pyx_t_6 = __pyx_t_7;
     __pyx_L7_bool_binop_done:;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
     if (__pyx_t_6) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":803
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":813
  * 
  *         # Output padding bytes
  *         while offset[0] < new_offset:             # <<<<<<<<<<<<<<
@@ -3041,24 +4160,24 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             f += 1
  */
     while (1) {
-      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (!__pyx_t_6) break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":814
  *         # Output padding bytes
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte             # <<<<<<<<<<<<<<
  *             f += 1
  *             offset[0] += 1
  */
-      (__pyx_v_f[0]) = 120;
+      (__pyx_v_f[0]) = 0x78;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":815
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte
  *             f += 1             # <<<<<<<<<<<<<<
@@ -3067,7 +4186,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
       __pyx_v_f = (__pyx_v_f + 1);
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":816
  *             f[0] = 120 # "x"; pad byte
  *             f += 1
  *             offset[0] += 1             # <<<<<<<<<<<<<<
@@ -3078,7 +4197,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":818
  *             offset[0] += 1
  * 
  *         offset[0] += child.itemsize             # <<<<<<<<<<<<<<
@@ -3088,7 +4207,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_8 = 0;
     (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":820
  *         offset[0] += child.itemsize
  * 
  *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
@@ -3098,19 +4217,19 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
     if (__pyx_t_6) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":821
  * 
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num             # <<<<<<<<<<<<<<
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")
  */
-      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
       __pyx_t_4 = 0;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":822
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num
  *             if end - f < 5:             # <<<<<<<<<<<<<<
@@ -3120,357 +4239,365 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
       if (__pyx_t_6) {
 
-        /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+        /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         __Pyx_Raise(__pyx_t_4, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+        /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":822
+ *         if not PyDataType_HASFIELDS(child):
+ *             t = child.type_num
+ *             if end - f < 5:             # <<<<<<<<<<<<<<
+ *                 raise RuntimeError(u"Format string allocated too short.")
+ * 
+ */
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":826
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 98;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":827
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"             # <<<<<<<<<<<<<<
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 66;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":828
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"             # <<<<<<<<<<<<<<
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 104;
+        (__pyx_v_f[0]) = 0x68;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":829
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"             # <<<<<<<<<<<<<<
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 72;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":830
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 105;
+        (__pyx_v_f[0]) = 0x69;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":831
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 73;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":832
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 108;
+        (__pyx_v_f[0]) = 0x6C;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":833
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 76;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":834
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 113;
+        (__pyx_v_f[0]) = 0x71;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":835
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 81;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":836
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"             # <<<<<<<<<<<<<<
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 102;
+        (__pyx_v_f[0]) = 0x66;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":837
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 100;
+        (__pyx_v_f[0]) = 0x64;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":838
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"             # <<<<<<<<<<<<<<
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 103;
+        (__pyx_v_f[0]) = 0x67;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":839
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf             # <<<<<<<<<<<<<<
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 102;
+        (__pyx_v_f[1]) = 0x66;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":843
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":840
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd             # <<<<<<<<<<<<<<
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 100;
+        (__pyx_v_f[1]) = 0x64;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":841
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg             # <<<<<<<<<<<<<<
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 103;
+        (__pyx_v_f[1]) = 0x67;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":842
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"             # <<<<<<<<<<<<<<
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 79;
         goto __pyx_L15;
       }
-      /*else*/ {
 
-        /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":844
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *             f += 1
  *         else:
  */
-        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      /*else*/ {
+        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
-        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
-        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __Pyx_GIVEREF(__pyx_t_3);
+        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __pyx_t_3 = 0;
-        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
         __Pyx_Raise(__pyx_t_3, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       __pyx_L15:;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":845
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *             f += 1             # <<<<<<<<<<<<<<
@@ -3478,23 +4605,31 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             # Cython ignores struct boundary information ("T{...}"),
  */
       __pyx_v_f = (__pyx_v_f + 1);
+
+      /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":820
+ *         offset[0] += child.itemsize
+ * 
+ *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
+ *             t = child.type_num
+ *             if end - f < 5:
+ */
       goto __pyx_L13;
     }
-    /*else*/ {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":849
  *             # Cython ignores struct boundary information ("T{...}"),
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)             # <<<<<<<<<<<<<<
  *     return f
  * 
  */
-      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    /*else*/ {
+      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_v_f = __pyx_t_9;
     }
     __pyx_L13:;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -3504,7 +4639,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":850
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)
  *     return f             # <<<<<<<<<<<<<<
@@ -3514,7 +4649,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   __pyx_r = __pyx_v_f;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":785
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -3539,7 +4674,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":966
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -3554,7 +4689,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   int __pyx_t_2;
   __Pyx_RefNannySetupContext("set_array_base", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":968
  * cdef inline void set_array_base(ndarray arr, object base):
  *      cdef PyObject* baseptr
  *      if base is None:             # <<<<<<<<<<<<<<
@@ -3565,7 +4700,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __pyx_t_2 = (__pyx_t_1 != 0);
   if (__pyx_t_2) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":969
  *      cdef PyObject* baseptr
  *      if base is None:
  *          baseptr = NULL             # <<<<<<<<<<<<<<
@@ -3573,20 +4708,28 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  *          Py_INCREF(base) # important to do this before decref below!
  */
     __pyx_v_baseptr = NULL;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":968
+ * cdef inline void set_array_base(ndarray arr, object base):
+ *      cdef PyObject* baseptr
+ *      if base is None:             # <<<<<<<<<<<<<<
+ *          baseptr = NULL
+ *      else:
+ */
     goto __pyx_L3;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":971
  *          baseptr = NULL
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!             # <<<<<<<<<<<<<<
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  */
+  /*else*/ {
     Py_INCREF(__pyx_v_base);
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":975
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":972
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base             # <<<<<<<<<<<<<<
@@ -3597,7 +4740,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   }
   __pyx_L3:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":973
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)             # <<<<<<<<<<<<<<
@@ -3606,7 +4749,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   Py_XDECREF(__pyx_v_arr->base);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":974
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  *      arr.base = baseptr             # <<<<<<<<<<<<<<
@@ -3615,7 +4758,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   __pyx_v_arr->base = __pyx_v_baseptr;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":966
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -3627,7 +4770,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __Pyx_RefNannyFinishContext();
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+/* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -3641,7 +4784,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("get_array_base", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":977
  * 
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:             # <<<<<<<<<<<<<<
@@ -3651,7 +4794,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":981
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":978
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:
  *         return None             # <<<<<<<<<<<<<<
@@ -3662,21 +4805,29 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
     __Pyx_INCREF(Py_None);
     __pyx_r = Py_None;
     goto __pyx_L0;
+
+    /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":977
+ * 
+ * cdef inline object get_array_base(ndarray arr):
+ *     if arr.base is NULL:             # <<<<<<<<<<<<<<
+ *         return None
+ *     else:
+ */
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":983
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":980
  *         return None
  *     else:
  *         return <object>arr.base             # <<<<<<<<<<<<<<
  */
+  /*else*/ {
     __Pyx_XDECREF(__pyx_r);
     __Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
     __pyx_r = ((PyObject *)__pyx_v_arr->base);
     goto __pyx_L0;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -3702,7 +4853,7 @@ static struct PyModuleDef __pyx_moduledef = {
   #else
     PyModuleDef_HEAD_INIT,
   #endif
-    "__subsample",
+    "_phylogenetic",
     0, /* m_doc */
     -1, /* m_size */
     __pyx_methods /* m_methods */,
@@ -3714,45 +4865,70 @@ static struct PyModuleDef __pyx_moduledef = {
 #endif
 
 static __Pyx_StringTabEntry __pyx_string_tab[] = {
+  {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1},
   {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
   {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
   {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
   {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
+  {&__pyx_kp_s_Users_caporaso_Dropbox_code_sci, __pyx_k_Users_caporaso_Dropbox_code_sci, sizeof(__pyx_k_Users_caporaso_Dropbox_code_sci), 0, 0, 1, 0},
   {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
-  {&__pyx_n_s_cnt, __pyx_k_cnt, sizeof(__pyx_k_cnt), 0, 0, 1, 1},
+  {&__pyx_n_s_a, __pyx_k_a, sizeof(__pyx_k_a), 0, 0, 1, 1},
+  {&__pyx_n_s_astype, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1},
+  {&__pyx_n_s_atleast_2d, __pyx_k_atleast_2d, sizeof(__pyx_k_atleast_2d), 0, 0, 1, 1},
+  {&__pyx_n_s_child_index, __pyx_k_child_index, sizeof(__pyx_k_child_index), 0, 0, 1, 1},
+  {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1},
+  {&__pyx_n_s_count_array, __pyx_k_count_array, sizeof(__pyx_k_count_array), 0, 0, 1, 1},
   {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1},
-  {&__pyx_n_s_counts_sum, __pyx_k_counts_sum, sizeof(__pyx_k_counts_sum), 0, 0, 1, 1},
+  {&__pyx_n_s_counts_t, __pyx_k_counts_t, sizeof(__pyx_k_counts_t), 0, 0, 1, 1},
+  {&__pyx_n_s_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 0, 1, 1},
   {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
-  {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
-  {&__pyx_kp_s_home_evan_biocore_scikit_bio_sk, __pyx_k_home_evan_biocore_scikit_bio_sk, sizeof(__pyx_k_home_evan_biocore_scikit_bio_sk), 0, 0, 1, 0},
   {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
-  {&__pyx_n_s_idx, __pyx_k_idx, sizeof(__pyx_k_idx), 0, 0, 1, 1},
+  {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
   {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+  {&__pyx_n_s_include_self, __pyx_k_include_self, sizeof(__pyx_k_include_self), 0, 0, 1, 1},
+  {&__pyx_n_s_indexed, __pyx_k_indexed, sizeof(__pyx_k_indexed), 0, 0, 1, 1},
+  {&__pyx_n_s_int64, __pyx_k_int64, sizeof(__pyx_k_int64), 0, 0, 1, 1},
   {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
   {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+  {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1},
   {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
+  {&__pyx_n_s_n_count_otus, __pyx_k_n_count_otus, sizeof(__pyx_k_n_count_otus), 0, 0, 1, 1},
+  {&__pyx_n_s_n_count_vectors, __pyx_k_n_count_vectors, sizeof(__pyx_k_n_count_vectors), 0, 0, 1, 1},
+  {&__pyx_n_s_n_rows, __pyx_k_n_rows, sizeof(__pyx_k_n_rows), 0, 0, 1, 1},
+  {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
   {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
   {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
+  {&__pyx_n_s_node_lookup, __pyx_k_node_lookup, sizeof(__pyx_k_node_lookup), 0, 0, 1, 1},
+  {&__pyx_n_s_nodes, __pyx_k_nodes, sizeof(__pyx_k_nodes), 0, 0, 1, 1},
+  {&__pyx_n_s_nodes_by_counts, __pyx_k_nodes_by_counts, sizeof(__pyx_k_nodes_by_counts), 0, 0, 1, 1},
+  {&__pyx_n_s_nonzero, __pyx_k_nonzero, sizeof(__pyx_k_nonzero), 0, 0, 1, 1},
   {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
   {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
-  {&__pyx_n_s_permutation, __pyx_k_permutation, sizeof(__pyx_k_permutation), 0, 0, 1, 1},
-  {&__pyx_n_s_permuted, __pyx_k_permuted, sizeof(__pyx_k_permuted), 0, 0, 1, 1},
-  {&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1},
+  {&__pyx_n_s_observed_ids, __pyx_k_observed_ids, sizeof(__pyx_k_observed_ids), 0, 0, 1, 1},
+  {&__pyx_n_s_observed_ids_set, __pyx_k_observed_ids_set, sizeof(__pyx_k_observed_ids_set), 0, 0, 1, 1},
+  {&__pyx_n_s_observed_indices, __pyx_k_observed_indices, sizeof(__pyx_k_observed_indices), 0, 0, 1, 1},
+  {&__pyx_n_s_otus_in_nodes, __pyx_k_otus_in_nodes, sizeof(__pyx_k_otus_in_nodes), 0, 0, 1, 1},
+  {&__pyx_n_s_p_i, __pyx_k_p_i, sizeof(__pyx_k_p_i), 0, 0, 1, 1},
+  {&__pyx_n_s_parent, __pyx_k_parent, sizeof(__pyx_k_parent), 0, 0, 1, 1},
+  {&__pyx_n_s_preorder, __pyx_k_preorder, sizeof(__pyx_k_preorder), 0, 0, 1, 1},
   {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
-  {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1},
-  {&__pyx_n_s_skbio_stats___subsample, __pyx_k_skbio_stats___subsample, sizeof(__pyx_k_skbio_stats___subsample), 0, 0, 1, 1},
-  {&__pyx_n_s_subsample_counts_without_replac, __pyx_k_subsample_counts_without_replac, sizeof(__pyx_k_subsample_counts_without_replac), 0, 0, 1, 1},
+  {&__pyx_n_s_skbio_diversity__phylogenetic, __pyx_k_skbio_diversity__phylogenetic, sizeof(__pyx_k_skbio_diversity__phylogenetic), 0, 0, 1, 1},
+  {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1},
+  {&__pyx_n_s_t, __pyx_k_t, sizeof(__pyx_k_t), 0, 0, 1, 1},
   {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+  {&__pyx_n_s_tip_distances, __pyx_k_tip_distances, sizeof(__pyx_k_tip_distances), 0, 0, 1, 1},
+  {&__pyx_n_s_tip_ds, __pyx_k_tip_ds, sizeof(__pyx_k_tip_ds), 0, 0, 1, 1},
+  {&__pyx_n_s_tip_ids, __pyx_k_tip_ids, sizeof(__pyx_k_tip_ids), 0, 0, 1, 1},
+  {&__pyx_n_s_tip_indices, __pyx_k_tip_indices, sizeof(__pyx_k_tip_indices), 0, 0, 1, 1},
+  {&__pyx_n_s_transpose, __pyx_k_transpose, sizeof(__pyx_k_transpose), 0, 0, 1, 1},
   {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
-  {&__pyx_n_s_unpacked, __pyx_k_unpacked, sizeof(__pyx_k_unpacked), 0, 0, 1, 1},
-  {&__pyx_n_s_unpacked_idx, __pyx_k_unpacked_idx, sizeof(__pyx_k_unpacked_idx), 0, 0, 1, 1},
-  {&__pyx_n_s_zeros_like, __pyx_k_zeros_like, sizeof(__pyx_k_zeros_like), 0, 0, 1, 1},
+  {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
   {0, 0, 0, 0, 0, 0, 0}
 };
 static int __Pyx_InitCachedBuiltins(void) {
-  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;
   __pyx_L1_error:;
   return -1;
@@ -3762,83 +4938,106 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+  /* "skbio/diversity/_phylogenetic.pyx":184
+ *     # determine observed IDs. It may be possible to unroll these calls to
+ *     # squeeze a little more performance
+ *     observed_indices = counts.sum(0).nonzero()[0]             # <<<<<<<<<<<<<<
+ *     observed_ids = tip_ids[observed_indices]
+ *     observed_ids_set = set(observed_ids)
+ */
+  __pyx_tuple_ = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple_);
+  __Pyx_GIVEREF(__pyx_tuple_);
+
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  */
-  __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_tuple_);
-  __Pyx_GIVEREF(__pyx_tuple_);
+  __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__2);
+  __Pyx_GIVEREF(__pyx_tuple__2);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
  * 
  *             info.buf = PyArray_DATA(self)
  */
-  __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_tuple__2);
-  __Pyx_GIVEREF(__pyx_tuple__2);
+  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__3);
+  __Pyx_GIVEREF(__pyx_tuple__3);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_tuple__3);
-  __Pyx_GIVEREF(__pyx_tuple__3);
+  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__4);
+  __Pyx_GIVEREF(__pyx_tuple__4);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":799
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_tuple__4);
-  __Pyx_GIVEREF(__pyx_tuple__4);
+  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__5);
+  __Pyx_GIVEREF(__pyx_tuple__5);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":803
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_tuple__5);
-  __Pyx_GIVEREF(__pyx_tuple__5);
+  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__6);
+  __Pyx_GIVEREF(__pyx_tuple__6);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_tuple__6);
-  __Pyx_GIVEREF(__pyx_tuple__6);
-
-  /* "skbio/stats/__subsample.pyx":15
- * 
- * 
- * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
- *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
- *     cdef:
- */
-  __pyx_tuple__7 = PyTuple_Pack(11, __pyx_n_s_counts, __pyx_n_s_n, __pyx_n_s_counts_sum, __pyx_n_s_result, __pyx_n_s_permuted, __pyx_n_s_unpacked, __pyx_n_s_cnt, __pyx_n_s_unpacked_idx, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_idx); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__7);
   __Pyx_GIVEREF(__pyx_tuple__7);
-  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_evan_biocore_scikit_bio_sk, __pyx_n_s_subsample_counts_without_replac, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+  /* "skbio/diversity/_phylogenetic.pyx":19
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _tip_distances(np.ndarray[np.double_t, ndim=1] a, object t,             # <<<<<<<<<<<<<<
+ *                    np.ndarray[DTYPE_t, ndim=1] tip_indices):
+ *     """Sets each tip to its distance from the root
+ */
+  __pyx_tuple__8 = PyTuple_Pack(9, __pyx_n_s_a, __pyx_n_s_t, __pyx_n_s_tip_indices, __pyx_n_s_n, __pyx_n_s_i, __pyx_n_s_p_i, __pyx_n_s_n_rows, __pyx_n_s_mask, __pyx_n_s_tip_ds); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__8);
+  __Pyx_GIVEREF(__pyx_tuple__8);
+  __pyx_codeobj__9 = (PyObject*)__Pyx_PyCode_New(3, 0, 9, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__8, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_caporaso_Dropbox_code_sci, __pyx_n_s_tip_distances, 19, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+  /* "skbio/diversity/_phylogenetic.pyx":143
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _nodes_by_counts(np.ndarray counts,             # <<<<<<<<<<<<<<
+ *                      np.ndarray tip_ids,
+ *                      dict indexed):
+ */
+  __pyx_tuple__10 = PyTuple_Pack(16, __pyx_n_s_counts, __pyx_n_s_tip_ids, __pyx_n_s_indexed, __pyx_n_s_nodes, __pyx_n_s_observed_ids, __pyx_n_s_count_array, __pyx_n_s_counts_t, __pyx_n_s_observed_indices, __pyx_n_s_otus_in_nodes, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_observed_ids_set, __pyx_n_s_n, __pyx_n_s_node_lookup, __pyx_n_s_n_count_vectors, __pyx_n_s_n_count_otus); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L [...]
+  __Pyx_GOTREF(__pyx_tuple__10);
+  __Pyx_GIVEREF(__pyx_tuple__10);
+  __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(3, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_caporaso_Dropbox_code_sci, __pyx_n_s_nodes_by_counts, 143, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_RefNannyFinishContext();
   return 0;
   __pyx_L1_error:;
@@ -3848,20 +5047,22 @@ static int __Pyx_InitCachedConstants(void) {
 
 static int __Pyx_InitGlobals(void) {
   if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;
   __pyx_L1_error:;
   return -1;
 }
 
 #if PY_MAJOR_VERSION < 3
-PyMODINIT_FUNC init__subsample(void); /*proto*/
-PyMODINIT_FUNC init__subsample(void)
+PyMODINIT_FUNC init_phylogenetic(void); /*proto*/
+PyMODINIT_FUNC init_phylogenetic(void)
 #else
-PyMODINIT_FUNC PyInit___subsample(void); /*proto*/
-PyMODINIT_FUNC PyInit___subsample(void)
+PyMODINIT_FUNC PyInit__phylogenetic(void); /*proto*/
+PyMODINIT_FUNC PyInit__phylogenetic(void)
 #endif
 {
   PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
@@ -3875,19 +5076,25 @@ PyMODINIT_FUNC PyInit___subsample(void)
           Py_FatalError("failed to import 'refnanny' module");
   }
   #endif
-  __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit___subsample(void)", 0);
-  if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__phylogenetic(void)", 0);
+  if (__Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #ifdef __Pyx_CyFunction_USED
-  if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
   #ifdef __Pyx_FusedFunction_USED
   if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
+  #ifdef __Pyx_Coroutine_USED
+  if (__pyx_Coroutine_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
   #ifdef __Pyx_Generator_USED
   if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
+  #ifdef __Pyx_StopAsyncIteration_USED
+  if (__pyx_StopAsyncIteration_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
   /*--- Library function declarations ---*/
   /*--- Threads initialization code ---*/
   #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
@@ -3897,7 +5104,7 @@ PyMODINIT_FUNC PyInit___subsample(void)
   #endif
   /*--- Module creation code ---*/
   #if PY_MAJOR_VERSION < 3
-  __pyx_m = Py_InitModule4("__subsample", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+  __pyx_m = Py_InitModule4("_phylogenetic", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
   #else
   __pyx_m = PyModule_Create(&__pyx_moduledef);
   #endif
@@ -3910,25 +5117,25 @@ PyMODINIT_FUNC PyInit___subsample(void)
   #endif
   if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
   /*--- Initialize various global constants etc. ---*/
-  if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitGlobals() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
   if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
-  if (__pyx_module_is_main_skbio__stats____subsample) {
-    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  if (__pyx_module_is_main_skbio__diversity___phylogenetic) {
+    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   #if PY_MAJOR_VERSION >= 3
   {
     PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    if (!PyDict_GetItemString(modules, "skbio.stats.__subsample")) {
-      if (unlikely(PyDict_SetItemString(modules, "skbio.stats.__subsample", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!PyDict_GetItemString(modules, "skbio.diversity._phylogenetic")) {
+      if (unlikely(PyDict_SetItemString(modules, "skbio.diversity._phylogenetic", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
   }
   #endif
   /*--- Builtin init code ---*/
-  if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitCachedBuiltins() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Constants init code ---*/
-  if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitCachedConstants() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Global init code ---*/
   /*--- Variable export code ---*/
   /*--- Function export code ---*/
@@ -3945,46 +5152,76 @@ PyMODINIT_FUNC PyInit___subsample(void)
   __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Variable import code ---*/
   /*--- Function import code ---*/
   /*--- Execution code ---*/
+  #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+  if (__Pyx_patch_abc() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
 
-  /* "skbio/stats/__subsample.pyx":11
- * from __future__ import absolute_import, division, print_function
+  /* "skbio/diversity/_phylogenetic.pyx":9
+ * # ----------------------------------------------------------------------------
  * 
  * import numpy as np             # <<<<<<<<<<<<<<
- * cimport numpy as cnp
- * 
+ * cimport numpy as np
+ * cimport cython
  */
-  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/stats/__subsample.pyx":15
+  /* "skbio/diversity/_phylogenetic.pyx":13
+ * cimport cython
  * 
+ * DTYPE = np.int64             # <<<<<<<<<<<<<<
+ * ctypedef np.int64_t DTYPE_t
  * 
- * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
- *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
- *     cdef:
  */
-  __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, NULL, __pyx_n_s_skbio_stats___subsample); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_subsample_counts_without_replac, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_int64); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":19
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _tip_distances(np.ndarray[np.double_t, ndim=1] a, object t,             # <<<<<<<<<<<<<<
+ *                    np.ndarray[DTYPE_t, ndim=1] tip_indices):
+ *     """Sets each tip to its distance from the root
+ */
+  __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5skbio_9diversity_13_phylogenetic_1_tip_distances, NULL, __pyx_n_s_skbio_diversity__phylogenetic); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_tip_distances, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+  /* "skbio/diversity/_phylogenetic.pyx":143
+ * @cython.boundscheck(False)
+ * @cython.wraparound(False)
+ * def _nodes_by_counts(np.ndarray counts,             # <<<<<<<<<<<<<<
+ *                      np.ndarray tip_ids,
+ *                      dict indexed):
+ */
+  __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5skbio_9diversity_13_phylogenetic_3_nodes_by_counts, NULL, __pyx_n_s_skbio_diversity__phylogenetic); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_nodes_by_counts, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/stats/__subsample.pyx":1
+  /* "skbio/diversity/_phylogenetic.pyx":1
  * # ----------------------------------------------------------------------------             # <<<<<<<<<<<<<<
  * # Copyright (c) 2013--, scikit-bio development team.
  * #
  */
-  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+  /* "../../../../../.miniconda/envs/skbio/lib/python3.4/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -3997,13 +5234,14 @@ PyMODINIT_FUNC PyInit___subsample(void)
   goto __pyx_L0;
   __pyx_L1_error:;
   __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
   if (__pyx_m) {
     if (__pyx_d) {
-      __Pyx_AddTraceback("init skbio.stats.__subsample", __pyx_clineno, __pyx_lineno, __pyx_filename);
+      __Pyx_AddTraceback("init skbio.diversity._phylogenetic", __pyx_clineno, __pyx_lineno, __pyx_filename);
     }
     Py_DECREF(__pyx_m); __pyx_m = 0;
   } else if (!PyErr_Occurred()) {
-    PyErr_SetString(PyExc_ImportError, "init skbio.stats.__subsample");
+    PyErr_SetString(PyExc_ImportError, "init skbio.diversity._phylogenetic");
   }
   __pyx_L0:;
   __Pyx_RefNannyFinishContext();
@@ -4758,23 +5996,6 @@ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
   __Pyx_ReleaseBuffer(info);
 }
 
-static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
-    PyObject *result;
-#if CYTHON_COMPILING_IN_CPYTHON
-    result = PyDict_GetItem(__pyx_d, name);
-    if (likely(result)) {
-        Py_INCREF(result);
-    } else {
-#else
-    result = PyObject_GetItem(__pyx_d, name);
-    if (!result) {
-        PyErr_Clear();
-#endif
-        result = __Pyx_GetBuiltinName(name);
-    }
-    return result;
-}
-
 #if CYTHON_COMPILING_IN_CPYTHON
 static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
     PyObject *result;
@@ -4794,28 +6015,6 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg
 }
 #endif
 
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
-    if (unlikely(!type)) {
-        PyErr_SetString(PyExc_SystemError, "Missing type object");
-        return 0;
-    }
-    if (likely(PyObject_TypeCheck(obj, type)))
-        return 1;
-    PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
-                 Py_TYPE(obj)->tp_name, type->tp_name);
-    return 0;
-}
-
-static void __Pyx_RaiseBufferFallbackError(void) {
-  PyErr_SetString(PyExc_ValueError,
-     "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
-}
-
-static void __Pyx_RaiseBufferIndexError(int axis) {
-  PyErr_Format(PyExc_IndexError,
-     "Out of bounds on buffer access (axis %d)", axis);
-}
-
 #if CYTHON_COMPILING_IN_CPYTHON
 static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
     PyObject *self, *result;
@@ -4860,106 +6059,62 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObjec
 }
 #else
 static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
-    PyObject* args = PyTuple_Pack(1, arg);
-    return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL;
+    PyObject *result;
+    PyObject *args = PyTuple_Pack(1, arg);
+    if (unlikely(!args)) return NULL;
+    result = __Pyx_PyObject_Call(func, args, NULL);
+    Py_DECREF(args);
+    return result;
 }
 #endif
 
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
-        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
-        PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
-        int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
 #if CYTHON_COMPILING_IN_CPYTHON
-    PyMappingMethods* mp;
-#if PY_MAJOR_VERSION < 3
-    PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
-    if (likely(ms && ms->sq_slice)) {
-        if (!has_cstart) {
-            if (_py_start && (*_py_start != Py_None)) {
-                cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
-                if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
-            } else
-                cstart = 0;
-        }
-        if (!has_cstop) {
-            if (_py_stop && (*_py_stop != Py_None)) {
-                cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
-                if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
-            } else
-                cstop = PY_SSIZE_T_MAX;
-        }
-        if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
-            Py_ssize_t l = ms->sq_length(obj);
-            if (likely(l >= 0)) {
-                if (cstop < 0) {
-                    cstop += l;
-                    if (cstop < 0) cstop = 0;
-                }
-                if (cstart < 0) {
-                    cstart += l;
-                    if (cstart < 0) cstart = 0;
-                }
-            } else {
-                if (PyErr_ExceptionMatches(PyExc_OverflowError))
-                    PyErr_Clear();
-                else
-                    goto bad;
-            }
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
+#ifdef __Pyx_CyFunction_USED
+    if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
+#else
+    if (likely(PyCFunction_Check(func))) {
+#endif
+        if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
+            return __Pyx_PyObject_CallMethO(func, NULL);
         }
-        return ms->sq_slice(obj, cstart, cstop);
     }
+    return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
+}
 #endif
-    mp = Py_TYPE(obj)->tp_as_mapping;
-    if (likely(mp && mp->mp_subscript))
-#endif
-    {
-        PyObject* result;
-        PyObject *py_slice, *py_start, *py_stop;
-        if (_py_slice) {
-            py_slice = *_py_slice;
-        } else {
-            PyObject* owned_start = NULL;
-            PyObject* owned_stop = NULL;
-            if (_py_start) {
-                py_start = *_py_start;
-            } else {
-                if (has_cstart) {
-                    owned_start = py_start = PyInt_FromSsize_t(cstart);
-                    if (unlikely(!py_start)) goto bad;
-                } else
-                    py_start = Py_None;
-            }
-            if (_py_stop) {
-                py_stop = *_py_stop;
-            } else {
-                if (has_cstop) {
-                    owned_stop = py_stop = PyInt_FromSsize_t(cstop);
-                    if (unlikely(!py_stop)) {
-                        Py_XDECREF(owned_start);
-                        goto bad;
-                    }
-                } else
-                    py_stop = Py_None;
-            }
-            py_slice = PySlice_New(py_start, py_stop, Py_None);
-            Py_XDECREF(owned_start);
-            Py_XDECREF(owned_stop);
-            if (unlikely(!py_slice)) goto bad;
-        }
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+    if (unlikely(!type)) {
+        PyErr_SetString(PyExc_SystemError, "Missing type object");
+        return 0;
+    }
+    if (likely(PyObject_TypeCheck(obj, type)))
+        return 1;
+    PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
+                 Py_TYPE(obj)->tp_name, type->tp_name);
+    return 0;
+}
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
+    PyObject *result;
 #if CYTHON_COMPILING_IN_CPYTHON
-        result = mp->mp_subscript(obj, py_slice);
+    result = PyDict_GetItem(__pyx_d, name);
+    if (likely(result)) {
+        Py_INCREF(result);
+    } else {
 #else
-        result = PyObject_GetItem(obj, py_slice);
+    result = PyObject_GetItem(__pyx_d, name);
+    if (!result) {
+        PyErr_Clear();
 #endif
-        if (!_py_slice) {
-            Py_DECREF(py_slice);
-        }
-        return result;
+        result = __Pyx_GetBuiltinName(name);
     }
-    PyErr_Format(PyExc_TypeError,
-        "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
-bad:
-    return NULL;
+    return result;
+}
+
+static void __Pyx_RaiseBufferFallbackError(void) {
+  PyErr_SetString(PyExc_ValueError,
+     "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
 }
 
 static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
@@ -4993,6 +6148,87 @@ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyOb
 #endif
 }
 
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+    PyObject *r;
+    if (!j) return NULL;
+    r = PyObject_GetItem(o, j);
+    Py_DECREF(j);
+    return r;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+                                                              CYTHON_NCP_UNUSED int wraparound,
+                                                              CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
+    if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+        PyObject *r = PyList_GET_ITEM(o, i);
+        Py_INCREF(r);
+        return r;
+    }
+    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+    return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+                                                              CYTHON_NCP_UNUSED int wraparound,
+                                                              CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
+    if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+        PyObject *r = PyTuple_GET_ITEM(o, i);
+        Py_INCREF(r);
+        return r;
+    }
+    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+    return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
+                                                     CYTHON_NCP_UNUSED int wraparound,
+                                                     CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    if (is_list || PyList_CheckExact(o)) {
+        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
+        if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
+            PyObject *r = PyList_GET_ITEM(o, n);
+            Py_INCREF(r);
+            return r;
+        }
+    }
+    else if (PyTuple_CheckExact(o)) {
+        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
+        if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
+            PyObject *r = PyTuple_GET_ITEM(o, n);
+            Py_INCREF(r);
+            return r;
+        }
+    } else {
+        PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+        if (likely(m && m->sq_item)) {
+            if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+                Py_ssize_t l = m->sq_length(o);
+                if (likely(l >= 0)) {
+                    i += l;
+                } else {
+                    if (PyErr_ExceptionMatches(PyExc_OverflowError))
+                        PyErr_Clear();
+                    else
+                        return NULL;
+                }
+            }
+            return m->sq_item(o, i);
+        }
+    }
+#else
+    if (is_list || PySequence_Check(o)) {
+        return PySequence_GetItem(o, i);
+    }
+#endif
+    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
 #if PY_MAJOR_VERSION < 3
 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
                         CYTHON_UNUSED PyObject *cause) {
@@ -5067,10 +6303,13 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
         if (value && PyExceptionInstance_Check(value)) {
             instance_class = (PyObject*) Py_TYPE(value);
             if (instance_class != type) {
-                if (PyObject_IsSubclass(instance_class, type)) {
-                    type = instance_class;
-                } else {
+                int is_subclass = PyObject_IsSubclass(instance_class, type);
+                if (!is_subclass) {
                     instance_class = NULL;
+                } else if (unlikely(is_subclass == -1)) {
+                    goto bad;
+                } else {
+                    type = instance_class;
                 }
             }
         }
@@ -5130,7 +6369,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
     if (tb) {
 #if CYTHON_COMPILING_IN_PYPY
         PyObject *tmp_type, *tmp_value, *tmp_tb;
-        PyErr_Fetch(tmp_type, tmp_value, tmp_tb);
+        PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
         Py_INCREF(tb);
         PyErr_Restore(tmp_type, tmp_value, tb);
         Py_XDECREF(tmp_tb);
@@ -5165,13 +6404,86 @@ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
 }
 
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+    PyObject *empty_list = 0;
+    PyObject *module = 0;
+    PyObject *global_dict = 0;
+    PyObject *empty_dict = 0;
+    PyObject *list;
+    #if PY_VERSION_HEX < 0x03030000
+    PyObject *py_import;
+    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+    if (!py_import)
+        goto bad;
+    #endif
+    if (from_list)
+        list = from_list;
+    else {
+        empty_list = PyList_New(0);
+        if (!empty_list)
+            goto bad;
+        list = empty_list;
+    }
+    global_dict = PyModule_GetDict(__pyx_m);
+    if (!global_dict)
+        goto bad;
+    empty_dict = PyDict_New();
+    if (!empty_dict)
+        goto bad;
+    {
+        #if PY_MAJOR_VERSION >= 3
+        if (level == -1) {
+            if (strchr(__Pyx_MODULE_NAME, '.')) {
+                #if PY_VERSION_HEX < 0x03030000
+                PyObject *py_level = PyInt_FromLong(1);
+                if (!py_level)
+                    goto bad;
+                module = PyObject_CallFunctionObjArgs(py_import,
+                    name, global_dict, empty_dict, list, py_level, NULL);
+                Py_DECREF(py_level);
+                #else
+                module = PyImport_ImportModuleLevelObject(
+                    name, global_dict, empty_dict, list, 1);
+                #endif
+                if (!module) {
+                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
+                        goto bad;
+                    PyErr_Clear();
+                }
+            }
+            level = 0;
+        }
+        #endif
+        if (!module) {
+            #if PY_VERSION_HEX < 0x03030000
+            PyObject *py_level = PyInt_FromLong(level);
+            if (!py_level)
+                goto bad;
+            module = PyObject_CallFunctionObjArgs(py_import,
+                name, global_dict, empty_dict, list, py_level, NULL);
+            Py_DECREF(py_level);
+            #else
+            module = PyImport_ImportModuleLevelObject(
+                name, global_dict, empty_dict, list, level);
+            #endif
+        }
+    }
+bad:
+    #if PY_VERSION_HEX < 0x03030000
+    Py_XDECREF(py_import);
+    #endif
+    Py_XDECREF(empty_list);
+    Py_XDECREF(empty_dict);
+    return module;
+}
+
 static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
     int start = 0, mid = 0, end = count - 1;
     if (end >= 0 && code_line > entries[end].code_line) {
         return count;
     }
     while (start < end) {
-        mid = (start + end) / 2;
+        mid = start + (end - start) / 2;
         if (code_line < entries[mid].code_line) {
             end = mid;
         } else if (code_line > entries[mid].code_line) {
@@ -5345,95 +6657,22 @@ static void __Pyx_ReleaseBuffer(Py_buffer *view) {
 #endif
 
 
-        static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
-    PyObject *empty_list = 0;
-    PyObject *module = 0;
-    PyObject *global_dict = 0;
-    PyObject *empty_dict = 0;
-    PyObject *list;
-    #if PY_VERSION_HEX < 0x03030000
-    PyObject *py_import;
-    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
-    if (!py_import)
-        goto bad;
-    #endif
-    if (from_list)
-        list = from_list;
-    else {
-        empty_list = PyList_New(0);
-        if (!empty_list)
-            goto bad;
-        list = empty_list;
-    }
-    global_dict = PyModule_GetDict(__pyx_m);
-    if (!global_dict)
-        goto bad;
-    empty_dict = PyDict_New();
-    if (!empty_dict)
-        goto bad;
-    {
-        #if PY_MAJOR_VERSION >= 3
-        if (level == -1) {
-            if (strchr(__Pyx_MODULE_NAME, '.')) {
-                #if PY_VERSION_HEX < 0x03030000
-                PyObject *py_level = PyInt_FromLong(1);
-                if (!py_level)
-                    goto bad;
-                module = PyObject_CallFunctionObjArgs(py_import,
-                    name, global_dict, empty_dict, list, py_level, NULL);
-                Py_DECREF(py_level);
-                #else
-                module = PyImport_ImportModuleLevelObject(
-                    name, global_dict, empty_dict, list, 1);
-                #endif
-                if (!module) {
-                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
-                        goto bad;
-                    PyErr_Clear();
-                }
-            }
-            level = 0;
-        }
-        #endif
-        if (!module) {
-            #if PY_VERSION_HEX < 0x03030000
-            PyObject *py_level = PyInt_FromLong(level);
-            if (!py_level)
-                goto bad;
-            module = PyObject_CallFunctionObjArgs(py_import,
-                name, global_dict, empty_dict, list, py_level, NULL);
-            Py_DECREF(py_level);
-            #else
-            module = PyImport_ImportModuleLevelObject(
-                name, global_dict, empty_dict, list, level);
-            #endif
-        }
-    }
-bad:
-    #if PY_VERSION_HEX < 0x03030000
-    Py_XDECREF(py_import);
-    #endif
-    Py_XDECREF(empty_list);
-    Py_XDECREF(empty_dict);
-    return module;
-}
-
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
-    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = 0;
+          static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
+    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = (Py_intptr_t) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(Py_intptr_t) < sizeof(long)) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(Py_intptr_t) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(Py_intptr_t) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -5445,21 +6684,21 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
 }
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
-    const npy_int64 neg_one = (npy_int64) -1, const_zero = 0;
+    const npy_int64 neg_one = (npy_int64) -1, const_zero = (npy_int64) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(npy_int64) < sizeof(long)) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(npy_int64) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(npy_int64) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(npy_int64) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(npy_int64) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(npy_int64) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(npy_int64) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -5470,75 +6709,194 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
     }
 }
 
-#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)       \
-    {                                                                     \
-        func_type value = func_value;                                     \
-        if (sizeof(target_type) < sizeof(func_type)) {                    \
-            if (unlikely(value != (func_type) (target_type) value)) {     \
-                func_type zero = 0;                                       \
-                if (is_unsigned && unlikely(value < zero))                \
-                    goto raise_neg_overflow;                              \
-                else                                                      \
-                    goto raise_overflow;                                  \
-            }                                                             \
-        }                                                                 \
-        return (target_type) value;                                       \
-    }
-
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+    const long neg_one = (long) -1, const_zero = (long) 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(long) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(long) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+        }
+    } else {
+        if (sizeof(long) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(long),
+                                     little, !is_unsigned);
+    }
+}
+
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+    {\
+        func_type value = func_value;\
+        if (sizeof(target_type) < sizeof(func_type)) {\
+            if (unlikely(value != (func_type) (target_type) value)) {\
+                func_type zero = 0;\
+                if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+                    return (target_type) -1;\
+                if (is_unsigned && unlikely(value < zero))\
+                    goto raise_neg_overflow;\
+                else\
+                    goto raise_overflow;\
+            }\
+        }\
+        return (target_type) value;\
+    }
+
+#if CYTHON_USE_PYLONG_INTERNALS
   #include "longintrepr.h"
- #endif
 #endif
 
-static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
-    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = 0;
+static CYTHON_INLINE npy_int64 __Pyx_PyInt_As_npy_int64(PyObject *x) {
+    const npy_int64 neg_one = (npy_int64) -1, const_zero = (npy_int64) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
-        if (sizeof(Py_intptr_t) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyInt_AS_LONG(x))
+        if (sizeof(npy_int64) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(npy_int64, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
                 goto raise_neg_overflow;
             }
-            return (Py_intptr_t) val;
+            return (npy_int64) val;
         }
     } else
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_int64) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int64, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(npy_int64) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) >= 2 * PyLong_SHIFT) {
+                            return (npy_int64) (((((npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_int64) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) >= 3 * PyLong_SHIFT) {
+                            return (npy_int64) (((((((npy_int64)digits[2]) << PyLong_SHIFT) | (npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_int64) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) >= 4 * PyLong_SHIFT) {
+                            return (npy_int64) (((((((((npy_int64)digits[3]) << PyLong_SHIFT) | (npy_int64)digits[2]) << PyLong_SHIFT) | (npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
                 goto raise_neg_overflow;
             }
-            if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong(x))
-            } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long long, PyLong_AsUnsignedLongLong(x))
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (npy_int64) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
+            if (sizeof(npy_int64) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int64, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(npy_int64) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int64, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (npy_int64) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(npy_int64, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int64,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(npy_int64) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_int64) (((npy_int64)-1)*(((((npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(npy_int64) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) - 1 > 2 * PyLong_SHIFT) {
+                            return (npy_int64) ((((((npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(npy_int64) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_int64) (((npy_int64)-1)*(((((((npy_int64)digits[2]) << PyLong_SHIFT) | (npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(npy_int64) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) - 1 > 3 * PyLong_SHIFT) {
+                            return (npy_int64) ((((((((npy_int64)digits[2]) << PyLong_SHIFT) | (npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(npy_int64) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_int64) (((npy_int64)-1)*(((((((((npy_int64)digits[3]) << PyLong_SHIFT) | (npy_int64)digits[2]) << PyLong_SHIFT) | (npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(npy_int64) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(npy_int64, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(npy_int64) - 1 > 4 * PyLong_SHIFT) {
+                            return (npy_int64) ((((((((((npy_int64)digits[3]) << PyLong_SHIFT) | (npy_int64)digits[2]) << PyLong_SHIFT) | (npy_int64)digits[1]) << PyLong_SHIFT) | (npy_int64)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
-            if (sizeof(Py_intptr_t) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyLong_AsLong(x))
-            } else if (sizeof(Py_intptr_t) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long long, PyLong_AsLongLong(x))
+            if (sizeof(npy_int64) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int64, long, PyLong_AsLong(x))
+            } else if (sizeof(npy_int64) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(npy_int64, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -5546,7 +6904,7 @@ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
             PyErr_SetString(PyExc_RuntimeError,
                             "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
 #else
-            Py_intptr_t val;
+            npy_int64 val;
             PyObject *v = __Pyx_PyNumber_Int(x);
  #if PY_MAJOR_VERSION < 3
             if (likely(v) && !PyLong_Check(v)) {
@@ -5566,24 +6924,24 @@ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
                     return val;
             }
 #endif
-            return (Py_intptr_t) -1;
+            return (npy_int64) -1;
         }
     } else {
-        Py_intptr_t val;
+        npy_int64 val;
         PyObject *tmp = __Pyx_PyNumber_Int(x);
-        if (!tmp) return (Py_intptr_t) -1;
-        val = __Pyx_PyInt_As_Py_intptr_t(tmp);
+        if (!tmp) return (npy_int64) -1;
+        val = __Pyx_PyInt_As_npy_int64(tmp);
         Py_DECREF(tmp);
         return val;
     }
 raise_overflow:
     PyErr_SetString(PyExc_OverflowError,
-        "value too large to convert to Py_intptr_t");
-    return (Py_intptr_t) -1;
+        "value too large to convert to npy_int64");
+    return (npy_int64) -1;
 raise_neg_overflow:
     PyErr_SetString(PyExc_OverflowError,
-        "can't convert negative value to Py_intptr_t");
-    return (Py_intptr_t) -1;
+        "can't convert negative value to npy_int64");
+    return (npy_int64) -1;
 }
 
 #if CYTHON_CCOMPLEX
@@ -5827,21 +7185,21 @@ raise_neg_overflow:
 #endif
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
-    const int neg_one = (int) -1, const_zero = 0;
+    const int neg_one = (int) -1, const_zero = (int) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(int) < sizeof(long)) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(int) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(int) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(int) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(int) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -5853,7 +7211,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
 }
 
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
-    const int neg_one = (int) -1, const_zero = 0;
+    const int neg_one = (int) -1, const_zero = (int) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -5870,36 +7228,125 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (int) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+                            return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+                            return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+                            return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
                 goto raise_neg_overflow;
             }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (int) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
             if (sizeof(int) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x))
-            } else if (sizeof(int) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (int) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                            return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                            return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+                            return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(int) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x))
-            } else if (sizeof(int) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+            } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -5947,34 +7394,34 @@ raise_neg_overflow:
     return (int) -1;
 }
 
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
-    const long neg_one = (long) -1, const_zero = 0;
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
+    const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
-        if (sizeof(long) < sizeof(long)) {
+        if (sizeof(enum NPY_TYPES) < sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(long) <= sizeof(unsigned long)) {
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(long) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
-        if (sizeof(long) <= sizeof(long)) {
+        if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(long) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
         int one = 1; int little = (int)*(unsigned char *)&one;
         unsigned char *bytes = (unsigned char *)&value;
-        return _PyLong_FromByteArray(bytes, sizeof(long),
+        return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
                                      little, !is_unsigned);
     }
 }
 
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
-    const long neg_one = (long) -1, const_zero = 0;
+    const long neg_one = (long) -1, const_zero = (long) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -5991,36 +7438,125 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (long) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+                            return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+                            return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+                            return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
                 goto raise_neg_overflow;
             }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (long) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
             if (sizeof(long) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x))
-            } else if (sizeof(long) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (long) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                            return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                            return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                            return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(long) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x))
-            } else if (sizeof(long) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+            } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -6202,7 +7738,7 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
     return __Pyx_PyObject_AsStringAndSize(o, &ignore);
 }
 static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
-#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
     if (
 #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
             __Pyx_sys_getdefaultencoding_not_ascii &&
@@ -6243,7 +7779,7 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
 #endif
     } else
 #endif
-#if !CYTHON_COMPILING_IN_PYPY
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
     if (PyByteArray_Check(o)) {
         *length = PyByteArray_GET_SIZE(o);
         return PyByteArray_AS_STRING(o);
@@ -6273,7 +7809,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
 #else
   if (PyLong_Check(x))
 #endif
-    return Py_INCREF(x), x;
+    return __Pyx_NewRef(x);
   m = Py_TYPE(x)->tp_as_number;
 #if PY_MAJOR_VERSION < 3
   if (m && m->nb_int) {
@@ -6313,18 +7849,55 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
   Py_ssize_t ival;
   PyObject *x;
 #if PY_MAJOR_VERSION < 3
-  if (likely(PyInt_CheckExact(b)))
-      return PyInt_AS_LONG(b);
+  if (likely(PyInt_CheckExact(b))) {
+    if (sizeof(Py_ssize_t) >= sizeof(long))
+        return PyInt_AS_LONG(b);
+    else
+        return PyInt_AsSsize_t(x);
+  }
 #endif
   if (likely(PyLong_CheckExact(b))) {
-    #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
-     #if CYTHON_USE_PYLONG_INTERNALS
-       switch (Py_SIZE(b)) {
-       case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
-       case  0: return 0;
-       case  1: return ((PyLongObject*)b)->ob_digit[0];
-       }
-     #endif
+    #if CYTHON_USE_PYLONG_INTERNALS
+    const digit* digits = ((PyLongObject*)b)->ob_digit;
+    const Py_ssize_t size = Py_SIZE(b);
+    if (likely(__Pyx_sst_abs(size) <= 1)) {
+        ival = likely(size) ? digits[0] : 0;
+        if (size == -1) ival = -ival;
+        return ival;
+    } else {
+      switch (size) {
+         case 2:
+           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -2:
+           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case 3:
+           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -3:
+           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case 4:
+           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -4:
+           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+      }
+    }
     #endif
     return PyLong_AsSsize_t(b);
   }
diff --git a/skbio/diversity/_phylogenetic.pyx b/skbio/diversity/_phylogenetic.pyx
new file mode 100644
index 0000000..91dfbae
--- /dev/null
+++ b/skbio/diversity/_phylogenetic.pyx
@@ -0,0 +1,215 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+DTYPE = np.int64
+ctypedef np.int64_t DTYPE_t
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def _tip_distances(np.ndarray[np.double_t, ndim=1] a, object t,
+                   np.ndarray[DTYPE_t, ndim=1] tip_indices):
+    """Sets each tip to its distance from the root
+
+    Parameters
+    ----------
+    a : np.ndarray of double
+        A matrix in which each row corresponds to a node in ``t``.
+    t : skbio.tree.TreeNode
+        The tree that corresponds to the rows in ``a``.
+    tip_indices : np.ndarray of int
+        The index positions in ``a`` of the tips in ``t``.
+
+    Returns
+    -------
+    np.ndarray of double
+        A matrix in which each row corresponds to a node in ``t``, Only the
+        rows that correspond to tips are nonzero, and the values in these rows
+        are the distance from that tip to the root of the tree.
+    """
+    cdef:
+        object n
+        Py_ssize_t i, p_i, n_rows
+        np.ndarray[np.double_t, ndim=1] mask
+        np.ndarray[np.double_t, ndim=1] tip_ds = a.copy()
+
+    # preorder reduction over the tree to gather distances at the tips
+    n_rows = tip_ds.shape[0]
+    for n in t.preorder(include_self=False):
+        i = n.id
+        p_i = n.parent.id
+
+        tip_ds[i] += tip_ds[p_i]
+
+    # construct a mask that represents the locations of the tips
+    mask = np.zeros(n_rows, dtype=np.double)
+    for i in range(tip_indices.shape[0]):
+        mask[tip_indices[i]] = 1.0
+
+    # apply the mask such that tip_ds only includes values which correspond to
+    # the tips of the tree.
+    for i in range(n_rows):
+        tip_ds[i] *= mask[i]
+
+    return tip_ds
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef _traverse_reduce(np.ndarray[DTYPE_t, ndim=2] child_index,
+                      np.ndarray[DTYPE_t, ndim=2] a):
+    """Apply a[k] = sum[i:j]
+
+    Parameters
+    ----------
+    child_index: np.array of int
+        A matrix in which the first column corresponds to an index position in
+        ``a``, which represents a node in a tree. The second column is the
+        starting index in ``a`` for the node's children, and the third column
+        is the ending index in ``a`` for the node's children.
+    a : np.ndarray of int
+        A matrix of the environment data. Each row corresponds to a node in a
+        tree, and each column corresponds to an environment. On input, it is
+        assumed that only tips have counts.
+
+    Notes
+    -----
+    This is effectively a postorder reduction over the tree. For example,
+    given the following tree:
+
+                            /-A
+                  /E-------|
+                 |          \-B
+        -root----|
+                 |          /-C
+                  \F-------|
+                            \-D
+
+    And assuming counts for [A, B, C, D] in environment FOO of [1, 1, 1, 0] and
+    counts for environment BAR of [0, 1, 1, 1], the input counts matrix ``a``
+    would be:
+
+        [1 0  -> A
+         1 1  -> B
+         1 1  -> C
+         0 1  -> D
+         0 0  -> E
+         0 0  -> F
+         0 0] -> root
+
+    The method will perform the following reduction:
+
+        [1 0     [1 0     [1 0     [1 0
+         1 1      1 1      1 1      1 1
+         1 1      1 1      1 1      1 1
+         0 1  ->  0 1  ->  0 1  ->  0 1
+         0 0      2 1      2 1      2 1
+         0 0      0 0      1 2      1 2
+         0 0]     0 0]     0 0]     3 3]
+
+    The index positions of the above are encoded in ``child_index`` which
+    describes the node to aggregate into, and the start and stop index
+    positions of the nodes immediate descendents.
+
+    This method operates inplace on ``a``
+    """
+    cdef:
+        Py_ssize_t i, j, k
+        DTYPE_t node, start, end
+        DTYPE_t n_envs = a.shape[1]
+
+    # possible GPGPU target
+    for i in range(child_index.shape[0]):
+        node = child_index[i, 0]
+        start = child_index[i, 1]
+        end = child_index[i, 2]
+
+        for j in range(start, end + 1):
+            for k in range(n_envs):
+                a[node, k] += a[j, k]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def _nodes_by_counts(np.ndarray counts,
+                     np.ndarray tip_ids,
+                     dict indexed):
+    """Construct the count array, and the counts up the tree
+
+    Parameters
+    ----------
+    counts : np.array of int
+        A 1D or 2D vector in which each row corresponds to the observed counts
+        in an environment. The rows are expected to be in order with respect to
+        `tip_ids`.
+    tip_ids : np.array of str
+        A vector of tip names that correspond to the columns in the `counts`
+        matrix.
+    indexed : dict
+        The result of `index_tree`.
+
+    Returns
+    -------
+    np.array of int
+        The observed counts of every node and the counts if its descendents.
+
+    """
+    cdef:
+        np.ndarray nodes, observed_ids
+        np.ndarray[DTYPE_t, ndim=2] count_array, counts_t
+        np.ndarray[DTYPE_t, ndim=1] observed_indices, otus_in_nodes
+        Py_ssize_t i, j
+        set observed_ids_set
+        object n
+        dict node_lookup
+        DTYPE_t n_count_vectors, n_count_otus
+
+    nodes = indexed['name']
+
+    # allow counts to be a vector
+    counts = np.atleast_2d(counts)
+    counts = counts.astype(DTYPE)
+
+    # determine observed IDs. It may be possible to unroll these calls to
+    # squeeze a little more performance
+    observed_indices = counts.sum(0).nonzero()[0]
+    observed_ids = tip_ids[observed_indices]
+    observed_ids_set = set(observed_ids)
+
+    # construct mappings of the observed to their positions in the node array
+    node_lookup = {}
+    for i in range(nodes.shape[0]):
+        n = nodes[i]
+        if n in observed_ids_set:
+            node_lookup[n] = i
+
+    # determine the positions of the observed IDs in nodes
+    otus_in_nodes = np.zeros(observed_ids.shape[0], dtype=DTYPE)
+    for i in range(observed_ids.shape[0]):
+        n = observed_ids[i]
+        otus_in_nodes[i] = node_lookup[n]
+
+    # count_array has a row per node (not tip) and a column per env.
+    n_count_vectors = counts.shape[0]
+    count_array = np.zeros((nodes.shape[0], n_count_vectors), dtype=DTYPE)
+
+    # populate the counts array with the counts of each observation in each
+    # env
+    counts_t = counts.transpose()
+    n_count_otus = otus_in_nodes.shape[0]
+    for i in range(n_count_otus):
+        for j in range(n_count_vectors):
+            count_array[otus_in_nodes[i], j] = counts_t[observed_indices[i], j]
+
+    _traverse_reduce(indexed['child_index'], count_array)
+
+    return count_array
diff --git a/skbio/diversity/_util.py b/skbio/diversity/_util.py
new file mode 100644
index 0000000..e54c2a8
--- /dev/null
+++ b/skbio/diversity/_util.py
@@ -0,0 +1,143 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+
+import numpy as np
+
+from skbio.tree import DuplicateNodeError, MissingNodeError
+from skbio.diversity._phylogenetic import _nodes_by_counts
+
+
+def _validate_counts_vector(counts, suppress_cast=False):
+    """Validate and convert input to an acceptable counts vector type.
+
+    Note: may not always return a copy of `counts`!
+
+    """
+    counts = np.asarray(counts)
+
+    if not suppress_cast:
+        counts = counts.astype(int, casting='safe', copy=False)
+
+    if counts.ndim != 1:
+        raise ValueError("Only 1-D vectors are supported.")
+    elif (counts < 0).any():
+        raise ValueError("Counts vector cannot contain negative values.")
+
+    return counts
+
+
+def _validate_counts_matrix(counts, ids=None, **kwargs):
+    results = []
+
+    # handle case of where counts is a single vector by making it a matrix.
+    # this has to be done before forcing counts into an ndarray because we
+    # don't yet know that all of the entries are of equal length
+    if len(counts) == 0 or not isinstance(counts[0], collections.Iterable):
+        counts = [counts]
+    counts = np.asarray(counts)
+    if counts.ndim > 2:
+        raise ValueError("Only 1-D and 2-D array-like objects can be provided "
+                         "as input. Provided object has %d dimensions." %
+                         counts.ndim)
+
+    if ids is not None and len(counts) != len(ids):
+        raise ValueError(
+            "Number of rows in ``counts`` must be equal to number of provided "
+            "``ids``.")
+
+    # py2-compatible mechanism for specifying a keyword argument when also
+    # passing *args derived from SO answer:
+    # http://stackoverflow.com/a/15302038/3424666
+    suppress_cast = kwargs.pop('suppress_cast', False)
+
+    lens = []
+    for v in counts:
+        results.append(_validate_counts_vector(v, suppress_cast))
+        lens.append(len(v))
+    if len(set(lens)) > 1:
+        raise ValueError("All rows in ``counts`` must be of equal length.")
+
+    return np.asarray(results)
+
+
+def _validate_otu_ids_and_tree(counts, otu_ids, tree):
+
+    len_otu_ids = len(otu_ids)
+    set_otu_ids = set(otu_ids)
+    if len_otu_ids != len(set_otu_ids):
+        raise ValueError("``otu_ids`` cannot contain duplicated ids.")
+
+    if len(counts) != len_otu_ids:
+        raise ValueError("``otu_ids`` must be the same length as ``counts`` "
+                         "vector(s).")
+
+    if len(tree.root().children) == 0:
+        raise ValueError("``tree`` must contain more than just a root node.")
+
+    if len(tree.root().children) > 2:
+        # this is an imperfect check for whether the tree is rooted or not.
+        # can this be improved?
+        raise ValueError("``tree`` must be rooted.")
+
+    # all nodes (except the root node) have corresponding branch lengths
+    # all tip names in tree are unique
+    # all otu_ids correspond to tip names in tree
+    branch_lengths = []
+    tip_names = []
+    for e in tree.traverse():
+        if not e.is_root():
+            branch_lengths.append(e.length)
+        if e.is_tip():
+            tip_names.append(e.name)
+    set_tip_names = set(tip_names)
+    if len(tip_names) != len(set_tip_names):
+        raise DuplicateNodeError("All tip names must be unique.")
+    if np.array([l is None for l in branch_lengths]).any():
+        raise ValueError("All non-root nodes in ``tree`` must have a branch "
+                         "length.")
+    missing_tip_names = set_otu_ids - set_tip_names
+    if missing_tip_names != set():
+        n_missing_tip_names = len(missing_tip_names)
+        raise MissingNodeError("All ``otu_ids`` must be present as tip names "
+                               "in ``tree``. ``otu_ids`` not corresponding to "
+                               "tip names (n=%d): %s" %
+                               (n_missing_tip_names,
+                                " ".join(missing_tip_names)))
+
+
+def _vectorize_counts_and_tree(counts, otu_ids, tree):
+    """ Index tree and convert counts to np.array in corresponding order
+    """
+    tree_index = tree.to_array(nan_length_value=0.0)
+    otu_ids = np.asarray(otu_ids)
+    counts = np.atleast_2d(counts)
+    counts_by_node = _nodes_by_counts(counts, otu_ids, tree_index)
+    branch_lengths = tree_index['length']
+
+    # branch_lengths is just a reference to the array inside of tree_index,
+    # but it's used so much that it's convenient to just pull it out here.
+    return counts_by_node.T, tree_index, branch_lengths
+
+
+def _get_phylogenetic_kwargs(counts, **kwargs):
+    try:
+        otu_ids = kwargs.pop('otu_ids')
+    except KeyError:
+        raise ValueError("``otu_ids`` is required for phylogenetic diversity "
+                         "metrics.")
+    try:
+        tree = kwargs.pop('tree')
+    except KeyError:
+        raise ValueError("``tree`` is required for phylogenetic diversity "
+                         "metrics.")
+
+    return otu_ids, tree, kwargs
diff --git a/skbio/diversity/alpha/__init__.py b/skbio/diversity/alpha/__init__.py
index a196652..d37a936 100644
--- a/skbio/diversity/alpha/__init__.py
+++ b/skbio/diversity/alpha/__init__.py
@@ -4,55 +4,9 @@ Alpha diversity measures (:mod:`skbio.diversity.alpha`)
 
 .. currentmodule:: skbio.diversity.alpha
 
-This package provides implementations of various alpha diversity measures,
-including measures of richness, dominance, and evenness. Some functions
-generate confidence intervals (CIs). These functions have the suffix ``_ci``.
-
-All alpha diversity measures accept a vector of counts within a single sample,
-where each count is, for example, the number of observations of a particular
-Operational Taxonomic Unit, or OTU. We use the term "OTU" here very loosely, as
-these could be counts of any type of feature/observation (e.g., bacterial
-species). We'll refer to this vector as the *counts vector* or simply *counts*
-throughout the documentation.
-
-The counts vector must be one-dimensional and contain integers representing the
-number of individuals seen (or *counted*) for a particular OTU. Negative values
-are not allowed; the counts vector may only contain integers greater than or
-equal to zero.
-
-The counts vector is `array_like`: anything that can be converted into a 1-D
-numpy array is acceptable input. For example, you can provide a numpy array or
-a native Python list and the results should be identical.
-
-If the input to an alpha diversity measure does not meet the above
-requirements, the function will raise either a ``ValueError`` or a
-``TypeError``, depending on the condition that is violated.
-
-.. note:: There are different ways that samples are represented in the
-   ecological literature and in related software. The alpha diversity measures
-   provided here *always* assume that the input contains abundance data: each
-   count represents the number of individuals seen for a particular OTU in the
-   sample. For example, if you have two OTUs, where 3 individuals were observed
-   from one of the OTUs and only a single individual was observed from the
-   other, you could represent this data in the following forms (among others):
-
-   As a vector of counts. This is the expected type of input for the alpha
-   diversity measures in this module. There are 3 individuals from the OTU at
-   index 0, and 1 individual from the OTU at index 1:
-
-   >>> counts = [3, 1]
-
-   As a vector of indices. The OTU at index 0 is observed 3 times, while the
-   OTU at index 1 is observed 1 time:
-
-   >>> indices = [0, 0, 0, 1]
-
-   As a vector of frequencies. We have 1 OTU that is a singleton and 1 OTU that
-   is a tripleton. We do not have any 0-tons or doubletons:
-
-   >>> frequencies = [0, 1, 0, 1]
-
-   Always use the first representation (a counts vector) with this module.
+This package provides implementations of alpha diversity measures, including
+measures of richness, dominance, and evenness. Some functions generate
+confidence intervals (CIs). These functions have the suffix ``_ci``.
 
 Functions
 ---------
@@ -68,8 +22,8 @@ Functions
    dominance
    doubles
    enspie
-   equitability
    esty_ci
+   faith_pd
    fisher_alpha
    gini_index
    goods_coverage
@@ -84,6 +38,7 @@ Functions
    michaelis_menten_fit
    observed_otus
    osd
+   pielou_e
    robbins
    shannon
    simpson
@@ -91,43 +46,6 @@ Functions
    singles
    strong
 
-Examples
---------
-
->>> import numpy as np
-
-Assume we have the following abundance data for a sample, represented as a
-counts vector:
-
->>> counts = [1, 0, 0, 4, 1, 2, 3, 0]
-
-We can count the number of OTUs:
-
->>> observed_otus(counts)
-5
-
-Note that OTUs with counts of zero are ignored.
-
-In the previous example, we provided a Python list as input. We can also
-provide other types of input that are `array_like`:
-
->>> observed_otus((1, 0, 0, 4, 1, 2, 3, 0)) # tuple
-5
->>> observed_otus(np.array([1, 0, 0, 4, 1, 2, 3, 0])) # numpy array
-5
-
-All of the alpha diversity measures work in this manner.
-
-Other metrics include ``singles``, which tells us how many OTUs are observed
-exactly one time (i.e., are *singleton* OTUs), and ``doubles``, which tells us
-how many OTUs are observed exactly two times (i.e., are *doubleton* OTUs).
-Let's see how many singletons and doubletons there are in the sample:
-
->>> singles(counts)
-2
->>> doubles(counts)
-1
-
 """
 
 # ----------------------------------------------------------------------------
@@ -144,20 +62,23 @@ from skbio.util import TestRunner
 
 from ._ace import ace
 from ._chao1 import chao1, chao1_ci
+from ._faith_pd import faith_pd
 from ._base import (
-    berger_parker_d, brillouin_d, dominance, doubles, enspie, equitability,
-    esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q, margalef,
-    mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit, observed_otus,
-    osd, robbins, shannon, simpson, simpson_e, singles, strong)
+    berger_parker_d, brillouin_d, dominance, doubles, enspie,
+    esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q,
+    margalef, mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit,
+    observed_otus, osd, pielou_e, robbins, shannon, simpson, simpson_e,
+    singles, strong)
 from ._gini import gini_index
 from ._lladser import lladser_pe, lladser_ci
 
-__all__ = ['ace', 'chao1', 'chao1_ci', 'berger_parker_d', 'brillouin_d',
-           'dominance', 'doubles', 'enspie', 'equitability', 'esty_ci',
-           'fisher_alpha', 'goods_coverage', 'heip_e', 'kempton_taylor_q',
-           'margalef', 'mcintosh_d', 'mcintosh_e', 'menhinick',
-           'michaelis_menten_fit', 'observed_otus', 'osd', 'robbins',
-           'shannon', 'simpson', 'simpson_e', 'singles', 'strong',
-           'gini_index', 'lladser_pe', 'lladser_ci']
+
+__all__ = ['ace', 'chao1', 'chao1_ci', 'berger_parker_d',
+           'brillouin_d', 'dominance', 'doubles', 'enspie', 'esty_ci',
+           'faith_pd', 'fisher_alpha', 'gini_index', 'goods_coverage',
+           'heip_e', 'kempton_taylor_q', 'margalef', 'mcintosh_d',
+           'mcintosh_e', 'menhinick', 'michaelis_menten_fit', 'observed_otus',
+           'osd', 'pielou_e', 'robbins', 'shannon', 'simpson', 'simpson_e',
+           'singles', 'strong', 'lladser_pe', 'lladser_ci']
 
 test = TestRunner(__file__).test
diff --git a/skbio/diversity/alpha/_ace.py b/skbio/diversity/alpha/_ace.py
index e50cfea..8184791 100644
--- a/skbio/diversity/alpha/_ace.py
+++ b/skbio/diversity/alpha/_ace.py
@@ -10,13 +10,37 @@ from __future__ import absolute_import, division, print_function
 
 import numpy as np
 
-from ._base import _validate
+from skbio.diversity._util import _validate_counts_vector
 from skbio.util._decorator import experimental
 
 
 @experimental(as_of="0.4.0")
 def ace(counts, rare_threshold=10):
-    """Calculate the ACE metric (Abundance-based Coverage Estimator).
+    r"""Calculate the ACE metric (Abundance-based Coverage Estimator).
+
+    The ACE metric is defined as:
+
+    .. math::
+
+       S_{ace}=S_{abund}+\frac{S_{rare}}{C_{ace}}+
+       \frac{F_1}{C_{ace}}\gamma^2_{ace}
+
+    where :math:`S_{abund}` is the number of abundant OTUs (with more than
+    `rare_threshold`  individuals) when all samples are pooled,
+    :math:`S_{rare}` is the number of rare OTUs (with less than or equal to
+    `rare_threshold` individuals) when all samples are pooled, :math:`C_{ace}`
+    is the sample abundance coverage estimator, :math:`F_1` is the frequency of
+    singletons, and :math:`\gamma^2_{ace}` is the estimated coefficient of
+    variation for rare OTUs.
+
+    The estimated coefficient of variation is defined as (assuming
+    `rare_threshold` is 10, the default):
+
+    .. math::
+
+       \gamma^2_{ace}=max\left[\frac{S_{rare}}{C_{ace}}
+       \frac{\sum^{10}_{i=1}{{i\left(i-1\right)}}F_i}
+       {\left(N_{rare}\right)\left(N_{rare}-1\right)} -1,0\right]
 
     Parameters
     ----------
@@ -62,7 +86,7 @@ def ace(counts, rare_threshold=10):
        10:227-246.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     freq_counts = np.bincount(counts)
     s_rare = _otus_rare(freq_counts, rare_threshold)
     singles = freq_counts[1]
diff --git a/skbio/diversity/alpha/_base.py b/skbio/diversity/alpha/_base.py
index afa288d..324df3c 100644
--- a/skbio/diversity/alpha/_base.py
+++ b/skbio/diversity/alpha/_base.py
@@ -14,37 +14,23 @@ from scipy.optimize import fmin_powell, minimize_scalar
 
 from skbio.stats import subsample_counts
 from skbio.util._decorator import experimental
-
-
-def _validate(counts, suppress_cast=False):
-    """Validate and convert input to an acceptable counts vector type.
-
-    Note: may not always return a copy of `counts`!
-
-    """
-    counts = np.asarray(counts)
-
-    if not suppress_cast:
-        counts = counts.astype(int, casting='safe', copy=False)
-
-    if counts.ndim != 1:
-        raise ValueError("Only 1-D vectors are supported.")
-    elif (counts < 0).any():
-        raise ValueError("Counts vector cannot contain negative values.")
-
-    return counts
+from skbio.diversity._util import _validate_counts_vector
 
 
 @experimental(as_of="0.4.0")
 def berger_parker_d(counts):
-    """Calculate Berger-Parker dominance.
+    r"""Calculate Berger-Parker dominance.
 
     Berger-Parker dominance is defined as the fraction of the sample that
-    belongs to the most abundant OTUs:
+    belongs to the most abundant OTU:
 
     .. math::
 
-       d = \\frac{N_{max}}{N}
+       d = \frac{N_{max}}{N}
+
+    where :math:`N_{max}` is defined as the number of individuals in the most
+    abundant OTU (or any of the most abundant OTUs in the case of ties), and
+    :math:`N` is defined as the total number of individuals in the sample.
 
     Parameters
     ----------
@@ -67,18 +53,23 @@ def berger_parker_d(counts):
     .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return counts.max() / counts.sum()
 
 
 @experimental(as_of="0.4.0")
 def brillouin_d(counts):
-    """Calculate Brillouin index of alpha diversity, which is defined as:
+    r"""Calculate Brillouin index of alpha diversity.
+
+    This is calculated as follows:
 
     .. math::
 
-       HB = \\frac{\\ln N!-\\sum^5_{i=1}{\\ln n_i!}}{N}
+       HB = \frac{\ln N!-\sum^s_{i=1}{\ln n_i!}}{N}
 
+    where :math:`N` is defined as the total number of individuals in the
+    sample, :math:`s` is the number of OTUs, and :math:`n_i` is defined as the
+    number of individuals in the :math:`i^{\text{th}}` OTU.
 
     Parameters
     ----------
@@ -100,7 +91,7 @@ def brillouin_d(counts):
     .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     nz = counts[counts.nonzero()]
     n = nz.sum()
     return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
@@ -108,13 +99,13 @@ def brillouin_d(counts):
 
 @experimental(as_of="0.4.0")
 def dominance(counts):
-    """Calculate dominance.
+    r"""Calculate dominance.
 
     Dominance is defined as
 
     .. math::
 
-       \\sum{p_i^2}
+       \sum{p_i^2}
 
     where :math:`p_i` is the proportion of the entire community that OTU
     :math:`i` represents.
@@ -145,7 +136,7 @@ def dominance(counts):
     .. [1] http://folk.uio.no/ohammer/past/diversity.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     freqs = counts / counts.sum()
     return (freqs * freqs).sum()
 
@@ -165,15 +156,22 @@ def doubles(counts):
         Doubleton count.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return (counts == 2).sum()
 
 
 @experimental(as_of="0.4.0")
 def enspie(counts):
-    """Calculate ENS_pie alpha diversity measure.
+    r"""Calculate ENS_pie alpha diversity measure.
 
-    ENS_pie is equivalent to ``1 / dominance``.
+    ENS_pie is equivalent to ``1 / dominance``:
+
+    .. math::
+
+       ENS_{pie} = \frac{1}{\sum_{i=1}^s{p_i^2}}
+
+    where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
+    the community represented by OTU :math:`i`.
 
     Parameters
     ----------
@@ -200,55 +198,19 @@ def enspie(counts):
        Ecology Letters, Volume 16, Issue Supplement s1, pgs 17-26.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return 1 / dominance(counts)
 
 
 @experimental(as_of="0.4.0")
-def equitability(counts, base=2):
-    """Calculate equitability (Shannon index corrected for number of OTUs).
-
-    Parameters
-    ----------
-    counts : 1-D array_like, int
-        Vector of counts.
-    base : scalar, optional
-        Logarithm base to use in the calculations.
-
-    Returns
-    -------
-    double
-        Measure of equitability.
-
-    See Also
-    --------
-    shannon
-
-    Notes
-    -----
-    The implementation here is based on the description given in the SDR-IV
-    online manual [1]_.
-
-    References
-    ----------
-    .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
-
-    """
-    counts = _validate(counts)
-    numerator = shannon(counts, base)
-    denominator = np.log(observed_otus(counts)) / np.log(base)
-    return numerator / denominator
-
-
- at experimental(as_of="0.4.0")
 def esty_ci(counts):
-    """Calculate Esty's CI.
+    r"""Calculate Esty's CI.
 
     Esty's CI is defined as
 
     .. math::
 
-       F_1/N \\pm z\\sqrt{W}
+       F_1/N \pm z\sqrt{W}
 
     where :math:`F_1` is the number of singleton OTUs, :math:`N` is the total
     number of individuals (sum of abundances for all OTUs), and :math:`z` is a
@@ -259,7 +221,7 @@ def esty_ci(counts):
 
     .. math::
 
-       \\frac{F_1(N-F_1)+2NF_2}{N^3}
+       \frac{F_1(N-F_1)+2NF_2}{N^3}
 
     where :math:`F_2` is the number of doubleton OTUs.
 
@@ -284,7 +246,7 @@ def esty_ci(counts):
        estimator of the coverage of a random sample". Ann Statist 11: 905-912.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
 
     f1 = singles(counts)
     f2 = doubles(counts)
@@ -297,7 +259,17 @@ def esty_ci(counts):
 
 @experimental(as_of="0.4.0")
 def fisher_alpha(counts):
-    """Calculate Fisher's alpha.
+    r"""Calculate Fisher's alpha, a metric of diversity.
+
+    Fisher's alpha is estimated by solving the following equation for
+    :math:`\alpha`:
+
+    .. math::
+
+       S=\alpha\ln(1+\frac{N}{\alpha})
+
+    where :math:`S` is the number of OTUs and :math:`N` is the
+    total number of individuals in the sample.
 
     Parameters
     ----------
@@ -325,7 +297,7 @@ def fisher_alpha(counts):
     .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     n = counts.sum()
     s = observed_otus(counts)
 
@@ -349,13 +321,13 @@ def fisher_alpha(counts):
 
 @experimental(as_of="0.4.0")
 def goods_coverage(counts):
-    """Calculate Good's coverage of counts.
+    r"""Calculate Good's coverage of counts.
 
     Good's coverage estimator is defined as
 
     .. math::
 
-       1-\\frac{F_1}{N}
+       1-\frac{F_1}{N}
 
     where :math:`F_1` is the number of singleton OTUs and :math:`N` is the
     total number of individuals (sum of abundances for all OTUs).
@@ -371,7 +343,7 @@ def goods_coverage(counts):
         Good's coverage estimator.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     f1 = singles(counts)
     N = counts.sum()
     return 1 - (f1 / N)
@@ -379,7 +351,16 @@ def goods_coverage(counts):
 
 @experimental(as_of="0.4.0")
 def heip_e(counts):
-    """Calculate Heip's evenness measure.
+    r"""Calculate Heip's evenness measure.
+
+    Heip's evenness is defined as:
+
+    .. math::
+
+       \frac{(e^H-1)}{(S-1)}
+
+    where :math:`H` is the Shannon-Wiener entropy of counts (using logarithm
+    base :math:`e`) and :math:`S` is the number of OTUs in the sample.
 
     Parameters
     ----------
@@ -391,6 +372,11 @@ def heip_e(counts):
     double
         Heip's evenness measure.
 
+    See Also
+    --------
+    shannon
+    pielou_e
+
     Notes
     -----
     The implementation here is based on the description in [1]_.
@@ -401,7 +387,7 @@ def heip_e(counts):
        UK., 54, 555-557.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return ((np.exp(shannon(counts, base=np.e)) - 1) /
             (observed_otus(counts) - 1))
 
@@ -448,7 +434,7 @@ def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
     .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     n = len(counts)
     lower = int(np.ceil(n * lower_quantile))
     upper = int(n * upper_quantile)
@@ -459,15 +445,16 @@ def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
 
 @experimental(as_of="0.4.0")
 def margalef(counts):
-    """Calculate Margalef's richness index, which is defined as:
+    r"""Calculate Margalef's richness index.
 
-    .. math::
+    Margalef's D is defined as:
 
-       D = \\frac{(S - 1)}{\\ln N}
+    .. math::
 
-    where :math:`S` is the species number and :math:`N` is the
-    total number of individuals (sum of abundances for all OTUs).
+       D = \frac{(S - 1)}{\ln N}
 
+    where :math:`S` is the number of OTUs and :math:`N` is the total number of
+    individuals in the sample.
 
     Assumes log accumulation.
 
@@ -491,27 +478,29 @@ def margalef(counts):
        76-77.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return (observed_otus(counts) - 1) / np.log(counts.sum())
 
 
 @experimental(as_of="0.4.0")
 def mcintosh_d(counts):
-    """Calculate McIntosh dominance index D, which is defined as:
+    r"""Calculate McIntosh dominance index D.
+
+    McIntosh dominance index D is defined as:
 
     .. math::
 
-       D = \\frac{N - U}{N - \\sqrt{N}}
+       D = \frac{N - U}{N - \sqrt{N}}
 
-    where :math:`N` is the total number of individuals (sum of abundances for
-    all OTUs) and :math:`U` is given as:
+    where :math:`N` is the total number of individuals in the sample and
+    :math:`U` is defined as:
 
     .. math::
 
-        U = \\sqrt{\\sum{{n_i}^2}}
+       U = \sqrt{\sum{{n_i}^2}}
 
-    where :math:`n_i` is the sum of abundances for all OTUs in the
-    :math:`i_{th}` species.
+    where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
+    OTU.
 
     Parameters
     ----------
@@ -539,7 +528,7 @@ def mcintosh_d(counts):
     .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     u = np.sqrt((counts * counts).sum())
     n = counts.sum()
     return (n - u) / (n - np.sqrt(n))
@@ -547,7 +536,17 @@ def mcintosh_d(counts):
 
 @experimental(as_of="0.4.0")
 def mcintosh_e(counts):
-    """Calculate McIntosh's evenness measure E.
+    r"""Calculate McIntosh's evenness measure E.
+
+    McIntosh evenness measure E is defined as:
+
+    .. math::
+
+       E = \frac{\sqrt{\sum{n_i^2}}}{\sqrt{((N-S+1)^2 + S -1}}
+
+    where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
+    OTU, :math:`N` is the total number of individuals, and :math:`S` is the
+    number of OTUs in the sample.
 
     Parameters
     ----------
@@ -565,15 +564,16 @@ def mcintosh_e(counts):
 
     Notes
     -----
-    The implementation here is based on the description given in [1]_, *NOT*
+    The implementation here is based on the description given in [1]_, **NOT**
     the one in the SDR-IV online manual, which is wrong.
 
     References
     ----------
-    .. [1] Heip & Engels 1974 p 560.
+    .. [1] Heip & Engels (1974) Comparing Species Diversity and Evenness
+       Indices. p 560.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     numerator = np.sqrt((counts * counts).sum())
     n = counts.sum()
     s = observed_otus(counts)
@@ -583,7 +583,16 @@ def mcintosh_e(counts):
 
 @experimental(as_of="0.4.0")
 def menhinick(counts):
-    """Calculate Menhinick's richness index.
+    r"""Calculate Menhinick's richness index.
+
+    Menhinick's richness index is defined as:
+
+    .. math::
+
+       D_{Mn} = \frac{S}{\sqrt{N}}
+
+    where :math:`S` is the number of OTUs and :math:`N` is the total number of
+    individuals in the sample.
 
     Assumes square-root accumulation.
 
@@ -607,19 +616,19 @@ def menhinick(counts):
        76-77.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return observed_otus(counts) / np.sqrt(counts.sum())
 
 
 @experimental(as_of="0.4.0")
 def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
-    """Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
+    r"""Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
 
-    The Michaelis-Menten equation is defined as
+    The Michaelis-Menten equation is defined as:
 
     .. math::
 
-       S=\\frac{nS_{max}}{n+B}
+       S=\frac{nS_{max}}{n+B}
 
     where :math:`n` is the number of individuals and :math:`S` is the number of
     OTUs. This function estimates the :math:`S_{max}` parameter.
@@ -665,7 +674,7 @@ def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
        Michaelis-Menten equation. Biometrics 43, 793-803.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
 
     n_indiv = counts.sum()
     if params_guess is None:
@@ -705,7 +714,7 @@ def observed_otus(counts):
         Distinct OTU count.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return (counts != 0).sum()
 
 
@@ -735,19 +744,66 @@ def osd(counts):
     on these three measures.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return observed_otus(counts), singles(counts), doubles(counts)
 
 
+ at experimental(as_of="0.4.1")
+def pielou_e(counts):
+    r"""Calculate Pielou's Evenness index J'.
+
+    Pielou's Evenness is defined as:
+
+    .. math::
+
+       J' = \frac{(H)}{\ln(S)}
+
+    where :math:`H` is the Shannon-Wiener entropy of counts and :math:`S` is
+    the number of OTUs in the sample.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Pielou's Evenness.
+
+    See Also
+    --------
+    shannon
+    heip_e
+
+    Notes
+    -----
+    The implementation here is based on the description in Wikipedia [1]_.
+    It was first proposed by E. C. Pielou [2]_ and is similar to Heip's
+    evenness [3]_.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Species_evenness
+    .. [2] Pielou, E. C., 1966. The measurement of diversity in different types
+       of biological collections. Journal of Theoretical Biology, 13, 131-44.
+    .. [3] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
+       UK., 54, 555-557.
+
+    """
+    counts = _validate_counts_vector(counts)
+    return shannon(counts, base=np.e) / np.log(observed_otus(counts))
+
+
 @experimental(as_of="0.4.0")
 def robbins(counts):
-    """Calculate Robbins' estimator for the probability of unobserved outcomes.
+    r"""Calculate Robbins' estimator for the probability of unobserved outcomes.
 
-    Robbins' estimator is defined as
+    Robbins' estimator is defined as:
 
     .. math::
 
-       \\frac{F_1}{n+1}
+       \frac{F_1}{n+1}
 
     where :math:`F_1` is the number of singleton OTUs.
 
@@ -771,13 +827,22 @@ def robbins(counts):
     .. [1] Robbins, H. E (1968). Ann. of Stats. Vol 36, pp. 256-257.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return singles(counts) / counts.sum()
 
 
 @experimental(as_of="0.4.0")
 def shannon(counts, base=2):
-    """Calculate Shannon entropy of counts (H), default in bits.
+    r"""Calculate Shannon entropy of counts, default in bits.
+
+    Shannon-Wiener diversity index is defined as:
+
+    .. math::
+
+       H = -\sum_{i=1}^s\left(p_i\log_2 p_i\right)
+
+    where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
+    the community represented by OTU :math:`i`.
 
     Parameters
     ----------
@@ -794,7 +859,7 @@ def shannon(counts, base=2):
     Notes
     -----
     The implementation here is based on the description given in the SDR-IV
-    online manual [1]_, except that the default logarithm base used here is 2
+    online manual [1]_ except that the default logarithm base used here is 2
     instead of :math:`e`.
 
     References
@@ -802,7 +867,7 @@ def shannon(counts, base=2):
     .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     freqs = counts / counts.sum()
     nonzero_freqs = freqs[freqs.nonzero()]
     return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
@@ -810,9 +875,16 @@ def shannon(counts, base=2):
 
 @experimental(as_of="0.4.0")
 def simpson(counts):
-    """Calculate Simpson's index.
+    r"""Calculate Simpson's index.
+
+    Simpson's index is defined as ``1 - dominance``:
+
+    .. math::
+
+       1 - \sum{p_i^2}
 
-    Simpson's index is defined as 1 - dominance.
+    where :math:`p_i` is the proportion of the community represented by OTU
+    :math:`i`.
 
     Parameters
     ----------
@@ -839,19 +911,19 @@ def simpson(counts):
     .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return 1 - dominance(counts)
 
 
 @experimental(as_of="0.4.0")
 def simpson_e(counts):
-    """Calculate Simpson's evenness measure E.
+    r"""Calculate Simpson's evenness measure E.
 
     Simpson's E is defined as
 
     .. math::
 
-       E=\\frac{1 / D}{S_{obs}}
+       E=\frac{1 / D}{S_{obs}}
 
     where :math:`D` is dominance and :math:`S_{obs}` is the number of observed
     OTUs.
@@ -881,7 +953,7 @@ def simpson_e(counts):
     .. [1] http://www.tiem.utk.edu/~gross/bioed/bealsmodules/simpsonDI.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return enspie(counts) / observed_otus(counts)
 
 
@@ -900,13 +972,26 @@ def singles(counts):
         Singleton count.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     return (counts == 1).sum()
 
 
 @experimental(as_of="0.4.0")
 def strong(counts):
-    """Calculate Strong's dominance index (Dw).
+    r"""Calculate Strong's dominance index.
+
+    Strong's dominance index is defined as:
+
+    .. math::
+
+       D_w = max_i[(\frac{b_i}{N})-\frac{i}{S}]
+
+    where :math:`b_i` is the sequential cumulative totaling of the
+    :math:`i^{\text{th}}` OTU abundance values ranked from largest to smallest,
+    :math:`N` is the total number of individuals in the sample, and
+    :math:`S` is the number of OTUs in the sample. The expression in brackets
+    is computed for all OTUs, and :math:`max_i` denotes the maximum value in
+    brackets for any OTU.
 
     Parameters
     ----------
@@ -930,7 +1015,7 @@ def strong(counts):
     .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     n = counts.sum()
     s = observed_otus(counts)
     i = np.arange(1, len(counts) + 1)
diff --git a/skbio/diversity/alpha/_chao1.py b/skbio/diversity/alpha/_chao1.py
index 4c3c93d..f85b520 100644
--- a/skbio/diversity/alpha/_chao1.py
+++ b/skbio/diversity/alpha/_chao1.py
@@ -10,13 +10,14 @@ from __future__ import absolute_import, division, print_function
 
 import numpy as np
 
-from ._base import _validate, osd
+from ._base import osd
+from skbio.diversity._util import _validate_counts_vector
 from skbio.util._decorator import experimental
 
 
 @experimental(as_of="0.4.0")
 def chao1(counts, bias_corrected=True):
-    """Calculate chao1 richness estimator.
+    r"""Calculate chao1 richness estimator.
 
     Uses the bias-corrected version unless `bias_corrected` is ``False`` *and*
     there are both singletons and doubletons.
@@ -46,7 +47,7 @@ def chao1(counts, bias_corrected=True):
 
     .. math::
 
-       chao1=S_{obs}+\\frac{F_1^2}{2F_2}
+       chao1=S_{obs}+\frac{F_1^2}{2F_2}
 
     where :math:`F_1` and :math:`F_2` are the count of singletons and
     doubletons, respectively.
@@ -55,7 +56,7 @@ def chao1(counts, bias_corrected=True):
 
     .. math::
 
-       chao1=S_{obs}+\\frac{F_1(F_1-1)}{2(F_2+1)}
+       chao1=S_{obs}+\frac{F_1(F_1-1)}{2(F_2+1)}
 
     References
     ----------
@@ -63,7 +64,7 @@ def chao1(counts, bias_corrected=True):
        a population. Scandinavian Journal of Statistics 11, 265-270.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     o, s, d = osd(counts)
 
     if not bias_corrected and s and d:
@@ -117,7 +118,7 @@ def chao1_ci(counts, bias_corrected=True, zscore=1.96):
     .. [1] http://viceroy.eeb.uconn.edu/estimates/
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     o, s, d = osd(counts)
     if s:
         chao = chao1(counts, bias_corrected)
diff --git a/skbio/diversity/alpha/_faith_pd.py b/skbio/diversity/alpha/_faith_pd.py
new file mode 100644
index 0000000..f868600
--- /dev/null
+++ b/skbio/diversity/alpha/_faith_pd.py
@@ -0,0 +1,143 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from skbio.util._decorator import experimental
+from skbio.diversity._util import (_validate_counts_vector,
+                                   _validate_otu_ids_and_tree,
+                                   _vectorize_counts_and_tree)
+
+
+def _faith_pd(counts_by_node, branch_lengths):
+    return (branch_lengths * (counts_by_node > 0)).sum()
+
+
+ at experimental(as_of="0.4.1")
+def faith_pd(counts, otu_ids, tree, validate=True):
+    """ Compute Faith's phylogenetic diversity metric (PD)
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vectors of counts/abundances of OTUs for one sample.
+    otu_ids: list, np.array
+        Vector of OTU ids corresponding to tip names in ``tree``. Must be the
+        same length as ``counts``.
+    tree: skbio.TreeNode
+        Tree relating the OTUs in otu_ids. The set of tip names in the tree can
+        be a superset of ``otu_ids``, but not a subset.
+    validate: bool, optional
+        If `False`, validation of the input won't be performed. This step can
+        be slow, so if validation is run elsewhere it can be disabled here.
+        However, invalid input data can lead to invalid results or error
+        messages that are hard to interpret, so this step should not be
+        bypassed if you're not certain that your input data are valid. See
+        :mod:`skbio.diversity` for the description of what validation entails
+        so you can determine if you can safely disable validation.
+
+    Returns
+    -------
+    float
+        The phylogenetic diversity (PD) of the samples.
+
+    Raises
+    ------
+    ValueError, MissingNodeError, DuplicateNodeError
+        If validation fails. Exact error will depend on what was invalid.
+
+    See Also
+    --------
+    skbio.diversity
+    skbio.diversity.alpha_diversity
+
+    Notes
+    -----
+    Faith's phylogenetic diversity, often referred to as PD, was originally
+    described in [1]_.
+
+    If computing Faith's PD for multiple samples, using
+    ``skbio.diversity.alpha_diversity`` will be much faster than calling this
+    function individually on each sample.
+
+    This implementation differs from that in PyCogent (and therefore QIIME
+    versions less than 2.0.0) by imposing a few additional restrictions on the
+    inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
+    tree was provided that had a single trifurcating node (a newick convention
+    for unrooted trees) that node was considered the root of the tree. Next,
+    all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
+    IDs that were not present the tree. To reproduce Faith PD results from
+    PyCogent with scikit-bio, ensure that your PyCogent Faith PD calculations
+    are performed on a rooted tree and that all OTU IDs are present in the
+    tree.
+
+    This implementation of Faith's PD is based on the array-based
+    implementation of UniFrac described in [2]_.
+
+    References
+    ----------
+    .. [1] Faith, D. P. Conservation evaluation and phylogenetic diversity.
+       Biol. Conserv. (1992).
+
+    .. [2] Hamady M, Lozupone C, Knight R. Fast UniFrac: facilitating high-
+       throughput phylogenetic analyses of microbial communities including
+       analysis of pyrosequencing and PhyloChip data.  ISME J. 4(1):17-27
+       (2010).
+
+    Examples
+    --------
+    Assume we have the following abundance data for a sample ``u``,
+    represented as a counts vector. These counts represent the
+    number of times specific Operational Taxonomic Units, or OTUs, were
+    observed in the sample.
+
+    >>> u_counts = [1, 0, 0, 4, 1, 2, 3, 0]
+
+    Because Faith PD is a phylogenetic diversity metric, we need to know which
+    OTU each count corresponds to, which we'll provide as ``otu_ids``.
+
+    >>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7',
+    ...            'OTU8']
+
+    We also need a phylogenetic tree that relates the OTUs to one another.
+
+    >>> from io import StringIO
+    >>> from skbio import TreeNode
+    >>> tree = TreeNode.read(StringIO(
+    ...                      u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
+    ...                      u'(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
+    ...                      u',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
+
+    We can then compute the Faith PD of the sample.
+
+    >>> from skbio.diversity.alpha import faith_pd
+    >>> pd = faith_pd(u_counts, otu_ids, tree)
+    >>> print(round(pd, 2))
+    6.95
+
+    """
+    counts_by_node, branch_lengths = _setup_faith_pd(
+        counts, otu_ids, tree, validate, single_sample=True)
+
+    return _faith_pd(counts_by_node, branch_lengths)
+
+
+def _setup_faith_pd(counts, otu_ids, tree, validate, single_sample):
+    if validate:
+        if single_sample:
+            # only validate count if operating in single sample mode, they
+            # will have already been validated otherwise
+            counts = _validate_counts_vector(counts)
+            _validate_otu_ids_and_tree(counts, otu_ids, tree)
+        else:
+            _validate_otu_ids_and_tree(counts[0], otu_ids, tree)
+
+    counts_by_node, tree_index, branch_lengths = \
+        _vectorize_counts_and_tree(counts, otu_ids, tree)
+
+    return counts_by_node, branch_lengths
diff --git a/skbio/diversity/alpha/_gini.py b/skbio/diversity/alpha/_gini.py
index 160e253..6a39edc 100644
--- a/skbio/diversity/alpha/_gini.py
+++ b/skbio/diversity/alpha/_gini.py
@@ -10,19 +10,19 @@ from __future__ import absolute_import, division, print_function
 
 import numpy as np
 
-from ._base import _validate
+from skbio.diversity._util import _validate_counts_vector
 from skbio.util._decorator import experimental
 
 
 @experimental(as_of="0.4.0")
 def gini_index(data, method='rectangles'):
-    """Calculate the Gini index.
+    r"""Calculate the Gini index.
 
     The Gini index is defined as
 
     .. math::
 
-       G=\\frac{A}{A+B}
+       G=\frac{A}{A+B}
 
     where :math:`A` is the area between :math:`y=x` and the Lorenz curve and
     :math:`B` is the area under the Lorenz curve. Simplifies to :math:`1-2B`
@@ -61,13 +61,13 @@ def gini_index(data, method='rectangles'):
 
     .. math::
 
-       dx\\sum_{i=1}^n h_i
+       dx\sum_{i=1}^n h_i
 
     The formula for ``method='trapezoids'`` is
 
     .. math::
 
-       dx(\\frac{h_0+h_n}{2}+\sum_{i=1}^{n-1} h_i)
+       dx(\frac{h_0+h_n}{2}+\sum_{i=1}^{n-1} h_i)
 
     References
     ----------
@@ -77,7 +77,7 @@ def gini_index(data, method='rectangles'):
 
     """
     # Suppress cast to int because this method supports ints and floats.
-    data = _validate(data, suppress_cast=True)
+    data = _validate_counts_vector(data, suppress_cast=True)
     lorenz_points = _lorenz_curve(data)
     B = _lorenz_curve_integrator(lorenz_points, method)
     return 1 - 2 * B
diff --git a/skbio/diversity/alpha/_lladser.py b/skbio/diversity/alpha/_lladser.py
index 61d6c02..d59d7a3 100644
--- a/skbio/diversity/alpha/_lladser.py
+++ b/skbio/diversity/alpha/_lladser.py
@@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function
 
 import numpy as np
 
-from ._base import _validate
+from skbio.diversity._util import _validate_counts_vector
 from skbio.util._decorator import experimental
 
 
@@ -49,7 +49,7 @@ def lladser_pe(counts, r=10):
        2011.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     sample = _expand_counts(counts)
     np.random.shuffle(sample)
 
@@ -103,7 +103,7 @@ def lladser_ci(counts, r, alpha=0.95, f=10, ci_type='ULCL'):
        2011.
 
     """
-    counts = _validate(counts)
+    counts = _validate_counts_vector(counts)
     sample = _expand_counts(counts)
     np.random.shuffle(sample)
 
diff --git a/skbio/diversity/alpha/tests/data/qiime-191-tt/README.md b/skbio/diversity/alpha/tests/data/qiime-191-tt/README.md
new file mode 100644
index 0000000..c633019
--- /dev/null
+++ b/skbio/diversity/alpha/tests/data/qiime-191-tt/README.md
@@ -0,0 +1 @@
+Files in this directory are the QIIME 1.9.1 "tiny test" files. For detail on how these were created, see skbio/diversity/beta/tests/data/qiime-191-tt/README.md.
diff --git a/skbio/diversity/alpha/tests/data/qiime-191-tt/faith-pd.txt b/skbio/diversity/alpha/tests/data/qiime-191-tt/faith-pd.txt
new file mode 100644
index 0000000..305658f
--- /dev/null
+++ b/skbio/diversity/alpha/tests/data/qiime-191-tt/faith-pd.txt
@@ -0,0 +1,9 @@
+	PD_whole_tree
+f2	0.64607
+f1	0.47803
+f3	0.47803
+f4	0.306525
+p2	1.4239
+p1	0.604145
+t1	0.6574
+t2	0.6574
diff --git a/skbio/diversity/alpha/tests/data/qiime-191-tt/otu-table.tsv b/skbio/diversity/alpha/tests/data/qiime-191-tt/otu-table.tsv
new file mode 100644
index 0000000..4aff943
--- /dev/null
+++ b/skbio/diversity/alpha/tests/data/qiime-191-tt/otu-table.tsv
@@ -0,0 +1,12 @@
+# Constructed from biom file
+#OTU ID	f2	f1	f3	f4	p2	p1	t1	t2
+295053	20	18	18	22	4	0	0	0
+42684	0	0	0	0	1	0	0	0
+None11	1	0	0	0	1	1	0	0
+None7	0	0	0	0	1	0	0	0
+None5	0	0	0	0	1	0	0	0
+None4	0	0	0	0	1	1	0	0
+None3	0	0	0	0	1	0	2	3
+879972	0	0	0	0	9	20	1	4
+None9	0	0	0	0	3	0	19	15
+None8	1	4	4	0	0	0	0	0
diff --git a/skbio/diversity/alpha/tests/data/qiime-191-tt/tree.nwk b/skbio/diversity/alpha/tests/data/qiime-191-tt/tree.nwk
new file mode 100644
index 0000000..199b8ca
--- /dev/null
+++ b/skbio/diversity/alpha/tests/data/qiime-191-tt/tree.nwk
@@ -0,0 +1 @@
+(((879972:0.05039,None3:0.00778)0.980:0.15948,((None11:0.07161,None4:0.06965)0.917:0.09643,(295053:0.06096,42684:0.15599)0.910:0.08898)0.899:0.09227)0.958:0.064315,(None8:0.09606,(None7:0.10435,(None5:0.02626,None9:0.00014)1.000:0.25335)0.753:0.0465):0.075445)root;
diff --git a/skbio/diversity/alpha/tests/test_base.py b/skbio/diversity/alpha/tests/test_base.py
index a5a3593..582eafa 100644
--- a/skbio/diversity/alpha/tests/test_base.py
+++ b/skbio/diversity/alpha/tests/test_base.py
@@ -9,72 +9,32 @@
 from __future__ import absolute_import, division, print_function
 
 from unittest import TestCase, main
+from io import StringIO
 
 import numpy as np
 import numpy.testing as npt
 
+from skbio import TreeNode
 from skbio.diversity.alpha import (
-    berger_parker_d, brillouin_d, dominance, doubles, enspie, equitability,
-    esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q, margalef,
-    mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit, observed_otus,
-    osd, robbins, shannon, simpson, simpson_e, singles, strong)
-from skbio.diversity.alpha._base import _validate
+    berger_parker_d, brillouin_d, dominance, doubles, enspie,
+    esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q,
+    margalef, mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit,
+    observed_otus, osd, pielou_e, robbins, shannon, simpson, simpson_e,
+    singles, strong)
 
 
 class BaseTests(TestCase):
     def setUp(self):
         self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
-
-    def test_validate(self):
-        # python list
-        obs = _validate([0, 2, 1, 3])
-        npt.assert_array_equal(obs, np.array([0, 2, 1, 3]))
-        self.assertEqual(obs.dtype, int)
-
-        # numpy array (no copy made)
-        data = np.array([0, 2, 1, 3])
-        obs = _validate(data)
-        npt.assert_array_equal(obs, data)
-        self.assertEqual(obs.dtype, int)
-        self.assertTrue(obs is data)
-
-        # single element
-        obs = _validate([42])
-        npt.assert_array_equal(obs, np.array([42]))
-        self.assertEqual(obs.dtype, int)
-        self.assertEqual(obs.shape, (1,))
-
-        # suppress casting to int
-        obs = _validate([42.2, 42.1, 0], suppress_cast=True)
-        npt.assert_array_equal(obs, np.array([42.2, 42.1, 0]))
-        self.assertEqual(obs.dtype, float)
-
-        # all zeros
-        obs = _validate([0, 0, 0])
-        npt.assert_array_equal(obs, np.array([0, 0, 0]))
-        self.assertEqual(obs.dtype, int)
-
-        # all zeros (single value)
-        obs = _validate([0])
-        npt.assert_array_equal(obs, np.array([0]))
-        self.assertEqual(obs.dtype, int)
-
-    def test_validate_invalid_input(self):
-        # wrong dtype
-        with self.assertRaises(TypeError):
-            _validate([0, 2, 1.2, 3])
-
-        # wrong number of dimensions (2-D)
-        with self.assertRaises(ValueError):
-            _validate([[0, 2, 1, 3], [4, 5, 6, 7]])
-
-        # wrong number of dimensions (scalar)
-        with self.assertRaises(ValueError):
-            _validate(1)
-
-        # negative values
-        with self.assertRaises(ValueError):
-            _validate([0, 0, 2, -1, 3])
+        self.sids1 = list('ABCD')
+        self.oids1 = ['OTU%d' % i for i in range(1, 6)]
+        self.t1 = TreeNode.read(StringIO(
+            u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
+        self.t1_w_extra_tips = TreeNode.read(
+           StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                    u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
+                    u')root;'))
 
     def test_berger_parker_d(self):
         self.assertEqual(berger_parker_d(np.array([5])), 1)
@@ -114,10 +74,6 @@ class BaseTests(TestCase):
         exp = 1 / dominance(arr)
         self.assertAlmostEqual(enspie(arr), exp)
 
-    def test_equitability(self):
-        self.assertAlmostEqual(equitability(np.array([5, 5])), 1)
-        self.assertAlmostEqual(equitability(np.array([1, 1, 1, 1, 0])), 1)
-
     def test_esty_ci(self):
         def _diversity(indices, f):
             """Calculate diversity index for each window of size 1.
@@ -251,6 +207,26 @@ class BaseTests(TestCase):
     def test_osd(self):
         self.assertEqual(osd(self.counts), (9, 3, 3))
 
+    def test_pielou_e(self):
+        # Calculate "by hand".
+        arr = np.array([1, 2, 3, 1])
+        h = shannon(arr, np.e)
+        s = 4
+        expected = h / np.log(s)
+        self.assertAlmostEqual(pielou_e(arr), expected)
+
+        self.assertAlmostEqual(pielou_e(self.counts), 0.92485490560)
+
+        self.assertEqual(pielou_e([1, 1]), 1.0)
+        self.assertEqual(pielou_e([1, 1, 1, 1]), 1.0)
+        self.assertEqual(pielou_e([1, 1, 1, 1, 0, 0]), 1.0)
+
+        # Examples from
+        # http://ww2.mdsg.umd.edu/interactive_lessons/biofilm/diverse.htm#3
+        self.assertAlmostEqual(pielou_e([1, 1, 196, 1, 1]), 0.078, 3)
+        self.assertTrue(np.isnan(pielou_e([0, 0, 200, 0, 0])))
+        self.assertTrue(np.isnan(pielou_e([0, 0, 0, 0, 0])))
+
     def test_robbins(self):
         self.assertEqual(robbins(np.array([1, 2, 3, 0, 1])), 2 / 7)
 
diff --git a/skbio/diversity/alpha/tests/test_faith_pd.py b/skbio/diversity/alpha/tests/test_faith_pd.py
new file mode 100644
index 0000000..485428e
--- /dev/null
+++ b/skbio/diversity/alpha/tests/test_faith_pd.py
@@ -0,0 +1,209 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+from io import StringIO
+import os
+
+import numpy as np
+import pandas as pd
+
+from skbio import TreeNode
+from skbio.util import get_data_path
+from skbio.tree import DuplicateNodeError, MissingNodeError
+from skbio.diversity.alpha import faith_pd
+
+
+class FaithPDTests(TestCase):
+
+    def setUp(self):
+        self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
+        self.b1 = np.array([[1, 3, 0, 1, 0],
+                            [0, 2, 0, 4, 4],
+                            [0, 0, 6, 2, 1],
+                            [0, 0, 1, 1, 1]])
+        self.sids1 = list('ABCD')
+        self.oids1 = ['OTU%d' % i for i in range(1, 6)]
+        self.t1 = TreeNode.read(StringIO(
+            u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
+        self.t1_w_extra_tips = TreeNode.read(
+           StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                    u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
+                    u')root;'))
+
+    def test_faith_pd_none_observed(self):
+        actual = faith_pd(np.array([], dtype=int),
+                          np.array([], dtype=int),
+                          self.t1)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+        actual = faith_pd([0, 0, 0, 0, 0], self.oids1, self.t1)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_faith_pd_all_observed(self):
+        actual = faith_pd([1, 1, 1, 1, 1], self.oids1, self.t1)
+        expected = sum(n.length for n in self.t1.traverse()
+                       if n.length is not None)
+        self.assertAlmostEqual(actual, expected)
+
+        actual = faith_pd([1, 2, 3, 4, 5], self.oids1, self.t1)
+        expected = sum(n.length for n in self.t1.traverse()
+                       if n.length is not None)
+        self.assertAlmostEqual(actual, expected)
+
+    def test_faith_pd(self):
+        # expected results derived from QIIME 1.9.1, which
+        # is a completely different implementation skbio's initial
+        # phylogenetic diversity implementation
+        actual = faith_pd(self.b1[0], self.oids1, self.t1)
+        expected = 4.5
+        self.assertAlmostEqual(actual, expected)
+        actual = faith_pd(self.b1[1], self.oids1, self.t1)
+        expected = 4.75
+        self.assertAlmostEqual(actual, expected)
+        actual = faith_pd(self.b1[2], self.oids1, self.t1)
+        expected = 4.75
+        self.assertAlmostEqual(actual, expected)
+        actual = faith_pd(self.b1[3], self.oids1, self.t1)
+        expected = 4.75
+        self.assertAlmostEqual(actual, expected)
+
+    def test_faith_pd_extra_tips(self):
+        # results are the same despite presences of unobserved tips in tree
+        actual = faith_pd(self.b1[0], self.oids1, self.t1_w_extra_tips)
+        expected = faith_pd(self.b1[0], self.oids1, self.t1)
+        self.assertAlmostEqual(actual, expected)
+        actual = faith_pd(self.b1[1], self.oids1, self.t1_w_extra_tips)
+        expected = faith_pd(self.b1[1], self.oids1, self.t1)
+        self.assertAlmostEqual(actual, expected)
+        actual = faith_pd(self.b1[2], self.oids1, self.t1_w_extra_tips)
+        expected = faith_pd(self.b1[2], self.oids1, self.t1)
+        self.assertAlmostEqual(actual, expected)
+        actual = faith_pd(self.b1[3], self.oids1, self.t1_w_extra_tips)
+        expected = faith_pd(self.b1[3], self.oids1, self.t1)
+        self.assertAlmostEqual(actual, expected)
+
+    def test_faith_pd_minimal(self):
+        # two tips
+        tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
+        actual = faith_pd([1, 0], ['OTU1', 'OTU2'], tree)
+        expected = 0.25
+        self.assertEqual(actual, expected)
+
+    def test_faith_pd_qiime_tiny_test(self):
+        # the following table and tree are derived from the QIIME 1.9.1
+        # "tiny-test" data
+        tt_table_fp = get_data_path(
+            os.path.join('qiime-191-tt', 'otu-table.tsv'), 'data')
+        tt_tree_fp = get_data_path(
+            os.path.join('qiime-191-tt', 'tree.nwk'), 'data')
+
+        self.q_table = pd.read_csv(tt_table_fp, sep='\t', skiprows=1,
+                                   index_col=0)
+        self.q_tree = TreeNode.read(tt_tree_fp)
+
+        expected_fp = get_data_path(
+            os.path.join('qiime-191-tt', 'faith-pd.txt'), 'data')
+        expected = pd.read_csv(expected_fp, sep='\t', index_col=0)
+        for sid in self.q_table.columns:
+            actual = faith_pd(self.q_table[sid],
+                              otu_ids=self.q_table.index,
+                              tree=self.q_tree)
+            self.assertAlmostEqual(actual, expected['PD_whole_tree'][sid])
+
+    def test_faith_pd_root_not_observed(self):
+        # expected values computed by hand
+        tree = TreeNode.read(
+            StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
+                     u'root;'))
+        otu_ids = ['OTU%d' % i for i in range(1, 5)]
+        # root node not observed, but branch between (OTU1, OTU2) and root
+        # is considered observed
+        actual = faith_pd([1, 1, 0, 0], otu_ids, tree)
+        expected = 0.6
+        self.assertAlmostEqual(actual, expected)
+
+        # root node not observed, but branch between (OTU3, OTU4) and root
+        # is considered observed
+        actual = faith_pd([0, 0, 1, 1], otu_ids, tree)
+        expected = 2.3
+        self.assertAlmostEqual(actual, expected)
+
+    def test_faith_pd_invalid_input(self):
+        # tree has duplicated tip ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(DuplicateNodeError, faith_pd, counts, otu_ids,
+                          t)
+
+        # unrooted tree as input
+        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                   u'OTU4:0.7);'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
+
+        # otu_ids has duplicated ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU2']
+        self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
+
+        # len of vectors not equal
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2']
+        self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
+
+        # negative counts
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, -3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
+
+        # tree with no branch lengths
+        t = TreeNode.read(
+            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
+
+        # tree missing some branch lengths
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
+
+        # otu_ids not present in tree
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU42']
+        self.assertRaises(MissingNodeError, faith_pd, counts, otu_ids, t)
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/diversity/beta/__init__.py b/skbio/diversity/beta/__init__.py
index 610c705..9fb38f4 100644
--- a/skbio/diversity/beta/__init__.py
+++ b/skbio/diversity/beta/__init__.py
@@ -4,15 +4,10 @@ Beta diversity measures (:mod:`skbio.diversity.beta`)
 
 .. currentmodule:: skbio.diversity.beta
 
-This package contains helper functions for working with scipy's pairwise
-distance (``pdist``) functions in scikit-bio, and will eventually be expanded
-to contain pairwise distance/dissimilarity methods that are not implemented
-(or planned to be implemented) in scipy.
-
-The functions in this package currently support applying ``pdist`` functions
-to all pairs of samples in a sample by observation count or abundance matrix
-and returning an ``skbio.DistanceMatrix`` object. This application is
-illustrated below for a few different forms of input.
+This package provides implementations of beta diversity measures for computing
+sample dissimilarity. Users of this package should also explore
+``scipy.spatial.distance.pdist``, as it contains implementations of additional
+beta diversity metrics with interfaces similar to those provided here.
 
 Functions
 ---------
@@ -20,153 +15,8 @@ Functions
 .. autosummary::
    :toctree: generated/
 
-    pw_distances
-    pw_distances_from_table
-
-Examples
---------
-Create a table containing 7 OTUs and 6 samples:
-
-.. plot::
-   :context:
-
-   >>> from skbio.diversity.beta import pw_distances
-   >>> import numpy as np
-   >>> data = [[23, 64, 14, 0, 0, 3, 1],
-   ...         [0, 3, 35, 42, 0, 12, 1],
-   ...         [0, 5, 5, 0, 40, 40, 0],
-   ...         [44, 35, 9, 0, 1, 0, 0],
-   ...         [0, 2, 8, 0, 35, 45, 1],
-   ...         [0, 0, 25, 35, 0, 19, 0]]
-   >>> ids = list('ABCDEF')
-
-   Compute Bray-Curtis distances between all pairs of samples and return a
-   ``DistanceMatrix`` object:
-
-   >>> bc_dm = pw_distances(data, ids, "braycurtis")
-   >>> print(bc_dm)
-   6x6 distance matrix
-   IDs:
-   'A', 'B', 'C', 'D', 'E', 'F'
-   Data:
-   [[ 0.          0.78787879  0.86666667  0.30927835  0.85714286  0.81521739]
-    [ 0.78787879  0.          0.78142077  0.86813187  0.75        0.1627907 ]
-    [ 0.86666667  0.78142077  0.          0.87709497  0.09392265  0.71597633]
-    [ 0.30927835  0.86813187  0.87709497  0.          0.87777778  0.89285714]
-    [ 0.85714286  0.75        0.09392265  0.87777778  0.          0.68235294]
-    [ 0.81521739  0.1627907   0.71597633  0.89285714  0.68235294  0.        ]]
-
-   Compute Jaccard distances between all pairs of samples and return a
-   ``DistanceMatrix`` object:
-
-   >>> j_dm = pw_distances(data, ids, "jaccard")
-   >>> print(j_dm)
-   6x6 distance matrix
-   IDs:
-   'A', 'B', 'C', 'D', 'E', 'F'
-   Data:
-   [[ 0.          0.83333333  1.          1.          0.83333333  1.        ]
-    [ 0.83333333  0.          1.          1.          0.83333333  1.        ]
-    [ 1.          1.          0.          1.          1.          1.        ]
-    [ 1.          1.          1.          0.          1.          1.        ]
-    [ 0.83333333  0.83333333  1.          1.          0.          1.        ]
-    [ 1.          1.          1.          1.          1.          0.        ]]
-
-   Determine if the resulting distance matrices are significantly correlated
-   by computing the Mantel correlation between them. Then determine if the
-   p-value is significant based on an alpha of 0.05:
-
-   >>> from skbio.stats.distance import mantel
-   >>> r, p_value, n = mantel(j_dm, bc_dm)
-   >>> print(r)
-   -0.209362157621
-   >>> print(p_value < 0.05)
-   False
-
-   Compute PCoA for both distance matrices, and then find the Procrustes
-   M-squared value that results from comparing the coordinate matrices.
-
-   >>> from skbio.stats.ordination import PCoA
-   >>> bc_pc = PCoA(bc_dm).scores()
-   >>> j_pc = PCoA(j_dm).scores()
-   >>> from skbio.stats.spatial import procrustes
-   >>> print(procrustes(bc_pc.site, j_pc.site)[2])
-   0.466134984787
-
-   All of this only gets interesting in the context of sample metadata, so
-   let's define some:
-
-   >>> import pandas as pd
-   >>> sample_md = {
-   ...    'A': {'body_site': 'gut', 'subject': 's1'},
-   ...    'B': {'body_site': 'skin', 'subject': 's1'},
-   ...    'C': {'body_site': 'tongue', 'subject': 's1'},
-   ...    'D': {'body_site': 'gut', 'subject': 's2'},
-   ...    'E': {'body_site': 'tongue', 'subject': 's2'},
-   ...    'F': {'body_site': 'skin', 'subject': 's2'}}
-   >>> sample_md = pd.DataFrame.from_dict(sample_md, orient='index')
-   >>> sample_md
-     subject body_site
-   A      s1       gut
-   B      s1      skin
-   C      s1    tongue
-   D      s2       gut
-   E      s2    tongue
-   F      s2      skin
-
-   Now let's plot our PCoA results, coloring each sample by the subject it
-   was taken from:
-
-   >>> fig = bc_pc.plot(sample_md, 'subject',
-   ...                  axis_labels=('PC 1', 'PC 2', 'PC 3'),
-   ...                  title='Samples colored by subject', cmap='jet', s=50)
-
-.. plot::
-   :context:
-
-   We don't see any clustering/grouping of samples. If we were to instead color
-   the samples by the body site they were taken from, we see that the samples
-   form three separate groups:
-
-   >>> import matplotlib.pyplot as plt
-   >>> plt.close('all') # not necessary for normal use
-   >>> fig = bc_pc.plot(sample_md, 'body_site',
-   ...                  axis_labels=('PC 1', 'PC 2', 'PC 3'),
-   ...                  title='Samples colored by body site', cmap='jet', s=50)
-
-Ordination techniques, such as PCoA, are useful for exploratory analysis. The
-next step is to quantify the strength of the grouping/clustering that we see in
-ordination plots. There are many statistical methods available to accomplish
-this; many operate on distance matrices. Let's use ANOSIM to quantify the
-strength of the clustering we see in the ordination plots above, using our
-Bray-Curtis distance matrix and sample metadata.
-
-First test the grouping of samples by subject:
-
->>> from skbio.stats.distance import anosim
->>> results = anosim(bc_dm, sample_md, column='subject', permutations=999)
->>> results['test statistic']
--0.4074074074074075
->>> results['p-value'] < 0.1
-False
-
-The negative value of ANOSIM's R statistic indicates anti-clustering and the
-p-value is insignificant at an alpha of 0.1.
-
-Now let's test the grouping of samples by body site:
-
->>> results = anosim(bc_dm, sample_md, column='body_site', permutations=999)
->>> results['test statistic']
-1.0
->>> results['p-value'] < 0.1
-True
-
-The R statistic of 1.0 indicates strong separation of samples based on body
-site. The p-value is significant at an alpha of 0.1.
-
-References
-----------
-.. [1] http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
+    unweighted_unifrac
+    weighted_unifrac
 
 """
 
@@ -182,8 +32,8 @@ from __future__ import absolute_import, division, print_function
 
 from skbio.util import TestRunner
 
-from ._base import pw_distances, pw_distances_from_table
+from ._unifrac import unweighted_unifrac, weighted_unifrac
 
-__all__ = ["pw_distances", "pw_distances_from_table"]
+__all__ = ["unweighted_unifrac", "weighted_unifrac"]
 
 test = TestRunner(__file__).test
diff --git a/skbio/diversity/beta/_base.py b/skbio/diversity/beta/_base.py
deleted file mode 100644
index 5922503..0000000
--- a/skbio/diversity/beta/_base.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-import numpy as np
-from scipy.spatial.distance import pdist, squareform
-
-from skbio.stats.distance import DistanceMatrix
-from skbio.util._decorator import experimental, deprecated
-
-
- at experimental(as_of="0.4.0")
-def pw_distances(counts, ids=None, metric="braycurtis"):
-    """Compute distances between all pairs of columns in a counts matrix
-
-    Parameters
-    ----------
-    counts : 2D array_like of ints or floats
-        Matrix containing count/abundance data where each row contains counts
-        of observations in a given sample.
-    ids : iterable of strs, optional
-        Identifiers for each sample in ``counts``.
-    metric : str, optional
-        The name of the pairwise distance function to use when generating
-        pairwise distances. See the scipy ``pdist`` docs, linked under *See
-        Also*, for available metrics.
-
-    Returns
-    -------
-    skbio.DistanceMatrix
-        Distances between all pairs of samples (i.e., rows). The number of
-        row and columns will be equal to the number of rows in ``counts``.
-
-    Raises
-    ------
-    ValueError
-        If ``len(ids) != len(counts)``.
-
-    See Also
-    --------
-    scipy.spatial.distance.pdist
-    pw_distances_from_table
-
-    """
-    num_samples = len(counts)
-    if ids is not None and num_samples != len(ids):
-        raise ValueError(
-            "Number of rows in counts must be equal to number of provided "
-            "ids.")
-
-    distances = pdist(counts, metric)
-    return DistanceMatrix(
-        squareform(distances, force='tomatrix', checks=False), ids)
-
-pw_distances_from_table_deprecation_reason = (
-    "In the future, pw_distance will take a biom.table.Table object "
-    "and this function will be removed. You will need to update your "
-    "code to call pw_distances at that time.")
-
-
- at deprecated(as_of="0.4.0", until="0.4.1",
-            reason=pw_distances_from_table_deprecation_reason)
-def pw_distances_from_table(table, metric="braycurtis"):
-    """Compute distances between all pairs of samples in table
-
-    Parameters
-    ----------
-    table : biom.table.Table
-        ``Table`` containing count/abundance data of observations across
-        samples.
-    metric : str, optional
-        The name of the pairwise distance function to use when generating
-        pairwise distances. See the scipy ``pdist`` docs, linked under *See
-        Also*, for available metrics.
-
-    Returns
-    -------
-    skbio.DistanceMatrix
-        Distances between all pairs of samples. The number of row and columns
-        will be equal to the number of samples in ``table``.
-
-    See Also
-    --------
-    scipy.spatial.distance.pdist
-    biom.table.Table
-    pw_distances
-
-    """
-    sample_ids = table.ids(axis="sample")
-    num_samples = len(sample_ids)
-
-    # initialize the result object
-    dm = np.zeros((num_samples, num_samples))
-    for i, sid1 in enumerate(sample_ids):
-        v1 = table.data(sid1)
-        for j, sid2 in enumerate(sample_ids[:i]):
-            v2 = table.data(sid2)
-            dm[i, j] = dm[j, i] = pdist([v1, v2], metric)
-    return DistanceMatrix(dm, sample_ids)
diff --git a/skbio/diversity/beta/_unifrac.py b/skbio/diversity/beta/_unifrac.py
new file mode 100644
index 0000000..852fddf
--- /dev/null
+++ b/skbio/diversity/beta/_unifrac.py
@@ -0,0 +1,579 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import functools
+
+import numpy as np
+
+from skbio.util._decorator import experimental
+from skbio.diversity._util import (_validate_counts_matrix,
+                                   _validate_otu_ids_and_tree,
+                                   _vectorize_counts_and_tree)
+from skbio.diversity._phylogenetic import _tip_distances
+
+
+# The default value indicating whether normalization should be applied
+# for weighted UniFrac. This is used in two locations, so set in a single
+# variable to avoid the code base becoming out of sync in the event of a
+# change in this default value.
+_normalize_weighted_unifrac_by_default = False
+
+
+ at experimental(as_of="0.4.1")
+def unweighted_unifrac(u_counts, v_counts, otu_ids, tree, validate=True):
+    """ Compute unweighted UniFrac
+
+    Parameters
+    ----------
+    u_counts, v_counts: list, np.array
+        Vectors of counts/abundances of OTUs for two samples. Must be equal
+        length.
+    otu_ids: list, np.array
+        Vector of OTU ids corresponding to tip names in ``tree``. Must be the
+        same length as ``u_counts`` and ``v_counts``.
+    tree: skbio.TreeNode
+        Tree relating the OTUs in otu_ids. The set of tip names in the tree can
+        be a superset of ``otu_ids``, but not a subset.
+    validate: bool, optional
+        If `False`, validation of the input won't be performed. This step can
+        be slow, so if validation is run elsewhere it can be disabled here.
+        However, invalid input data can lead to invalid results or error
+        messages that are hard to interpret, so this step should not be
+        bypassed if you're not certain that your input data are valid. See
+        :mod:`skbio.diversity` for the description of what validation entails
+        so you can determine if you can safely disable validation.
+
+    Returns
+    -------
+    float
+        The unweighted UniFrac distance between the two samples.
+
+    Raises
+    ------
+    ValueError, MissingNodeError, DuplicateNodeError
+        If validation fails. Exact error will depend on what was invalid.
+
+    See Also
+    --------
+    weighted_unifrac
+    skbio.diversity
+    skbio.diversity.beta_diversity
+
+    Notes
+    -----
+    Unweighted UniFrac was originally described in [1]_. A discussion of
+    unweighted (qualitative) versus weighted (quantitiative) diversity metrics
+    is presented in [2]_. Deeper mathemtical discussions of this metric is
+    presented in [3]_.
+
+    If computing unweighted UniFrac for multiple pairs of samples, using
+    ``skbio.diversity.beta_diversity`` will be much faster than calling this
+    function individually on each sample.
+
+    This implementation differs from that in PyCogent (and therefore QIIME
+    versions less than 2.0.0) by imposing a few additional restrictions on the
+    inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
+    tree was provided that had a single trifurcating node (a newick convention
+    for unrooted trees) that node was considered the root of the tree. Next,
+    all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
+    IDs that were not present the tree. To reproduce UniFrac results from
+    PyCogent with scikit-bio, ensure that your PyCogent UniFrac calculations
+    are performed on a rooted tree and that all OTU IDs are present in the
+    tree.
+
+    This implementation of unweighted UniFrac is the array-based implementation
+    described in [4]_.
+
+    References
+    ----------
+    .. [1] Lozupone, C. & Knight, R. UniFrac: a new phylogenetic method for
+       comparing microbial communities. Appl. Environ. Microbiol. 71, 8228-8235
+       (2005).
+
+    .. [2] Lozupone, C. A., Hamady, M., Kelley, S. T. & Knight, R. Quantitative
+       and qualitative beta diversity measures lead to different insights into
+       factors that structure microbial communities. Appl. Environ. Microbiol.
+       73, 1576-1585 (2007).
+
+    .. [3] Lozupone, C., Lladser, M. E., Knights, D., Stombaugh, J. & Knight,
+       R. UniFrac: an effective distance metric for microbial community
+       comparison. ISME J. 5, 169-172 (2011).
+
+    .. [4] Hamady M, Lozupone C, Knight R. Fast UniFrac: facilitating high-
+       throughput phylogenetic analyses of microbial communities including
+       analysis of pyrosequencing and PhyloChip data.  ISME J. 4(1):17-27
+       (2010).
+
+    Examples
+    --------
+    Assume we have the following abundance data for two samples, ``u`` and
+    ``v``, represented as a pair of counts vectors. These counts represent the
+    number of times specific Operational Taxonomic Units, or OTUs, were
+    observed in each of the samples.
+
+    >>> u_counts = [1, 0, 0, 4, 1, 2, 3, 0]
+    >>> v_counts = [0, 1, 1, 6, 0, 1, 0, 0]
+
+    Because UniFrac is a phylogenetic diversity metric, we need to know which
+    OTU each count corresponds to, which we'll provide as ``otu_ids``.
+
+    >>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7',
+    ...            'OTU8']
+
+    We also need a phylogenetic tree that relates the OTUs to one another.
+
+    >>> from io import StringIO
+    >>> from skbio import TreeNode
+    >>> tree = TreeNode.read(StringIO(
+    ...                      u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
+    ...                      u'(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
+    ...                      u',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
+
+    We can then compute the unweighted UniFrac distance between the samples.
+
+    >>> from skbio.diversity.beta import unweighted_unifrac
+    >>> uu = unweighted_unifrac(u_counts, v_counts, otu_ids, tree)
+    >>> print(round(uu, 2))
+    0.37
+
+    """
+    u_node_counts, v_node_counts, _, _, tree_index =\
+        _setup_pairwise_unifrac(u_counts, v_counts, otu_ids, tree, validate,
+                                normalized=False, unweighted=True)
+    return _unweighted_unifrac(u_node_counts, v_node_counts,
+                               tree_index['length'])
+
+
+ at experimental(as_of="0.4.1")
+def weighted_unifrac(u_counts, v_counts, otu_ids, tree,
+                     normalized=_normalize_weighted_unifrac_by_default,
+                     validate=True):
+    """ Compute weighted UniFrac with or without branch length normalization
+
+    Parameters
+    ----------
+    u_counts, v_counts: list, np.array
+        Vectors of counts/abundances of OTUs for two samples. Must be equal
+        length.
+    otu_ids: list, np.array
+        Vector of OTU ids corresponding to tip names in ``tree``. Must be the
+        same length as ``u_counts`` and ``v_counts``.
+    tree: skbio.TreeNode
+        Tree relating the OTUs in otu_ids. The set of tip names in the tree can
+        be a superset of ``otu_ids``, but not a subset.
+    normalized: boolean, optional
+        If ``True``, apply branch length normalization, which is described in
+        [1]_. Resulting distances will then be in the range ``[0, 1]``.
+    validate: bool, optional
+        If `False`, validation of the input won't be performed. This step can
+        be slow, so if validation is run elsewhere it can be disabled here.
+        However, invalid input data can lead to invalid results or error
+        messages that are hard to interpret, so this step should not be
+        bypassed if you're not certain that your input data are valid. See
+        :mod:`skbio.diversity` for the description of what validation entails
+        so you can determine if you can safely disable validation.
+
+    Returns
+    -------
+    float
+        The weighted UniFrac distance between the two samples.
+
+    Raises
+    ------
+    ValueError, MissingNodeError, DuplicateNodeError
+        If validation fails. Exact error will depend on what was invalid.
+
+    See Also
+    --------
+    unweighted_unifrac
+    skbio.diversity
+    skbio.diversity.beta_diversity
+
+    Notes
+    -----
+    Weighted UniFrac was originally described in [1]_, which includes a
+    discussion of unweighted (qualitative) versus weighted (quantitiative)
+    diversity metrics. Deeper mathemtical discussions of this metric is
+    presented in [2]_.
+
+    If computing weighted UniFrac for multiple pairs of samples, using
+    ``skbio.diversity.beta_diversity`` will be much faster than calling this
+    function individually on each sample.
+
+    This implementation differs from that in PyCogent (and therefore QIIME
+    versions less than 2.0.0) by imposing a few additional restrictions on the
+    inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
+    tree was provided that had a single trifurcating node (a newick convention
+    for unrooted trees) that node was considered the root of the tree. Next,
+    all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
+    IDs that were not present the tree. To reproduce UniFrac results from
+    PyCogent with scikit-bio, ensure that your PyCogent UniFrac calculations
+    are performed on a rooted tree and that all OTU IDs are present in the
+    tree.
+
+    This implementation of weighted UniFrac is the array-based implementation
+    described in [3]_.
+
+    References
+    ----------
+    .. [1] Lozupone, C. A., Hamady, M., Kelley, S. T. & Knight, R. Quantitative
+       and qualitative beta diversity measures lead to different insights into
+       factors that structure microbial communities. Appl. Environ. Microbiol.
+       73, 1576-1585 (2007).
+
+    .. [2] Lozupone, C., Lladser, M. E., Knights, D., Stombaugh, J. & Knight,
+       R. UniFrac: an effective distance metric for microbial community
+       comparison. ISME J. 5, 169-172 (2011).
+
+    .. [3] Hamady M, Lozupone C, Knight R. Fast UniFrac: facilitating high-
+       throughput phylogenetic analyses of microbial communities including
+       analysis of pyrosequencing and PhyloChip data.  ISME J. 4(1):17-27
+       (2010).
+
+    Examples
+    --------
+    Assume we have the following abundance data for two samples, ``u`` and
+    ``v``, represented as a pair of counts vectors. These counts represent the
+    number of times specific Operational Taxonomic Units, or OTUs, were
+    observed in each of the samples.
+
+    >>> u_counts = [1, 0, 0, 4, 1, 2, 3, 0]
+    >>> v_counts = [0, 1, 1, 6, 0, 1, 0, 0]
+
+    Because UniFrac is a phylogenetic diversity metric, we need to know which
+    OTU each count corresponds to, which we'll provide as ``otu_ids``.
+
+    >>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7',
+    ...            'OTU8']
+
+    We also need a phylogenetic tree that relates the OTUs to one another.
+
+    >>> from io import StringIO
+    >>> from skbio import TreeNode
+    >>> tree = TreeNode.read(StringIO(
+    ...                      u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
+    ...                      u'(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
+    ...                      u',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
+
+    Compute the weighted UniFrac distance between the samples.
+
+    >>> from skbio.diversity.beta import weighted_unifrac
+    >>> wu = weighted_unifrac(u_counts, v_counts, otu_ids, tree)
+    >>> print(round(wu, 2))
+    1.54
+
+    Compute the weighted UniFrac distance between the samples including
+    branch length normalization so the value falls in the range ``[0.0, 1.0]``.
+
+    >>> wu = weighted_unifrac(u_counts, v_counts, otu_ids, tree,
+    ...                       normalized=True)
+    >>> print(round(wu, 2))
+    0.33
+
+    """
+    u_node_counts, v_node_counts, u_total_count, v_total_count, tree_index =\
+        _setup_pairwise_unifrac(u_counts, v_counts, otu_ids, tree, validate,
+                                normalized=normalized, unweighted=False)
+    branch_lengths = tree_index['length']
+
+    if normalized:
+        tip_indices = _get_tip_indices(tree_index)
+        node_to_root_distances = _tip_distances(branch_lengths, tree,
+                                                tip_indices)
+        return _weighted_unifrac_normalized(u_node_counts, v_node_counts,
+                                            u_total_count, v_total_count,
+                                            branch_lengths,
+                                            node_to_root_distances)
+    else:
+        return _weighted_unifrac(u_node_counts, v_node_counts,
+                                 u_total_count, v_total_count,
+                                 branch_lengths)[0]
+
+
+def _validate(u_counts, v_counts, otu_ids, tree):
+    _validate_counts_matrix([u_counts, v_counts], suppress_cast=True)
+    _validate_otu_ids_and_tree(counts=u_counts, otu_ids=otu_ids, tree=tree)
+
+
+def _setup_pairwise_unifrac(u_counts, v_counts, otu_ids, tree, validate,
+                            normalized, unweighted):
+
+    if validate:
+        _validate(u_counts, v_counts, otu_ids, tree)
+
+    # temporarily store u_counts and v_counts in a 2-D array as that's what
+    # _vectorize_counts_and_tree takes
+    u_counts = np.asarray(u_counts)
+    v_counts = np.asarray(v_counts)
+    counts = np.vstack([u_counts, v_counts])
+    counts_by_node, tree_index, branch_lengths = \
+        _vectorize_counts_and_tree(counts, otu_ids, tree)
+    # unpack counts vectors for single pairwise UniFrac calculation
+    u_node_counts = counts_by_node[0]
+    v_node_counts = counts_by_node[1]
+
+    u_total_count = u_counts.sum()
+    v_total_count = v_counts.sum()
+
+    return (u_node_counts, v_node_counts, u_total_count, v_total_count,
+            tree_index)
+
+
+def _unweighted_unifrac(u_node_counts, v_node_counts, branch_lengths):
+    """
+    Parameters
+    ----------
+    u_node_counts, v_node_counts : np.array
+        Vectors indicating presense (value greater than zero) and absense
+        (value equal to zero) of nodes in two samples, `u` and `v`. Order is
+        assumed to be the same as in `branch_lengths`.
+    branch_lengths : np.array
+        Vector of branch lengths of all nodes (tips and internal nodes) in
+        postorder representation of their tree.
+
+    Returns
+    -------
+    float
+        Unweighted UniFrac distance between samples.
+
+    Notes
+    -----
+    The count vectors passed here correspond to all nodes in the tree, not
+    just the tips.
+
+    """
+    unique_nodes = np.logical_xor(u_node_counts, v_node_counts)
+    observed_nodes = np.logical_or(u_node_counts, v_node_counts)
+    unique_branch_length = (branch_lengths * unique_nodes).sum()
+    observed_branch_length = (branch_lengths * observed_nodes).sum()
+    if observed_branch_length == 0.0:
+        # handle special case to avoid division by zero
+        return 0.0
+    return unique_branch_length / observed_branch_length
+
+
+def _weighted_unifrac(u_node_counts, v_node_counts, u_total_count,
+                      v_total_count, branch_lengths):
+    """
+    Parameters
+    ----------
+    u_node_counts, v_node_counts : np.array
+        Vectors indicating presense (value greater than zero) and absense
+        (value equal to zero) of nodes in two samples, `u` and `v`. Order is
+        assumed to be the same as in `branch_lengths`.
+    u_total_count, v_total_counts : int
+        The sum of ``u_node_counts`` and ``v_node_counts`` vectors,
+        respectively. This could be computed internally, but since this is a
+        private method and the calling function has already generated these
+        values, this saves an iteration over each of these vectors.
+    branch_lengths : np.array
+        Vector of branch lengths of all nodes (tips and internal nodes) in
+        postorder representation of their tree.
+
+    Returns
+    -------
+    float
+        Weighted UniFrac distance between samples.
+    np.array of float
+        Proportional abundance of each node in tree in sample `u`
+    np.array of float
+        Proportional abundance of each node in tree in sample `v`
+
+    """
+    if u_total_count > 0:
+        # convert to relative abundances if there are any counts
+        u_node_proportions = u_node_counts / u_total_count
+    else:
+        # otherwise, we'll just do the computation with u_node_counts, which
+        # is necessarily all zeros
+        u_node_proportions = u_node_counts
+
+    if v_total_count > 0:
+        v_node_proportions = v_node_counts / v_total_count
+    else:
+        v_node_proportions = v_node_counts
+
+    wu = (branch_lengths *
+          np.absolute(u_node_proportions - v_node_proportions)).sum()
+    return wu, u_node_proportions, v_node_proportions
+
+
+def _weighted_unifrac_normalized(u_node_counts, v_node_counts, u_total_count,
+                                 v_total_count, branch_lengths,
+                                 node_to_root_distances):
+    """
+    Parameters
+    ----------
+    u_node_counts, v_node_counts : np.array
+         Vectors indicating presense (value greater than zero) and absense
+         (value equal to zero) of nodes in two samples, `u` and `v`. Order is
+         assumed to be the same as in `branch_lengths`.
+    u_total_count, v_total_counts : int
+         The sum of ``u_node_counts`` and ``v_node_counts`` vectors,
+         respectively. This could be computed internally, but since this is a
+         private method and the calling function has already generated these
+         values, this saves an iteration over each of these vectors.
+    tree: skbio.TreeNode
+         Tree relating the OTUs.
+
+    Returns
+    -------
+    float
+        Normalized weighted UniFrac distance between samples.
+
+    Notes
+    -----
+    The count vectors passed here correspond to all nodes in the tree, not
+    just the tips.
+
+    """
+    if u_total_count == 0.0 and v_total_count == 0.0:
+        # handle special case to avoid division by zero
+        return 0.0
+    u, u_node_proportions, v_node_proportions = _weighted_unifrac(
+        u_node_counts, v_node_counts, u_total_count, v_total_count,
+        branch_lengths)
+    c = _weighted_unifrac_branch_correction(
+        node_to_root_distances, u_node_proportions, v_node_proportions)
+
+    return u / c
+
+
+def _setup_multiple_unifrac(counts, otu_ids, tree, validate):
+    if validate:
+        _validate_otu_ids_and_tree(counts[0], otu_ids, tree)
+
+    counts_by_node, tree_index, branch_lengths = \
+        _vectorize_counts_and_tree(counts, otu_ids, tree)
+
+    return counts_by_node, tree_index, branch_lengths
+
+
+def _setup_multiple_unweighted_unifrac(counts, otu_ids, tree, validate):
+    """ Create optimized pdist-compatible unweighted UniFrac function
+
+    Parameters
+    ----------
+    counts : 2D array_like of ints or floats
+        Matrix containing count/abundance data where each row contains counts
+        of observations in a given sample.
+    otu_ids: list, np.array
+        Vector of OTU ids corresponding to tip names in ``tree``. Must be the
+        same length as ``u_counts`` and ``v_counts``. These IDs do not need to
+        be in tip order with respect to the tree.
+    tree: skbio.TreeNode
+        Tree relating the OTUs in otu_ids. The set of tip names in the tree can
+        be a superset of ``otu_ids``, but not a subset.
+    validate: bool, optional
+        If `False`, validation of the input won't be performed.
+
+    Returns
+    -------
+    function
+        Optimized pairwise unweighted UniFrac calculator that can be passed
+        to ``scipy.spatial.distance.pdist``.
+    2D np.array of ints, floats
+        Counts of all nodes in ``tree``.
+
+    """
+    counts_by_node, _, branch_lengths = \
+        _setup_multiple_unifrac(counts, otu_ids, tree, validate)
+
+    f = functools.partial(_unweighted_unifrac, branch_lengths=branch_lengths)
+
+    return f, counts_by_node
+
+
+def _setup_multiple_weighted_unifrac(counts, otu_ids, tree, normalized,
+                                     validate):
+    """ Create optimized pdist-compatible weighted UniFrac function
+
+    Parameters
+    ----------
+    counts : 2D array_like of ints or floats
+        Matrix containing count/abundance data where each row contains counts
+        of observations in a given sample.
+    otu_ids: list, np.array
+        Vector of OTU ids corresponding to tip names in ``tree``. Must be the
+        same length as ``u_counts`` and ``v_counts``. These IDs do not need to
+        be in tip order with respect to the tree.
+    tree: skbio.TreeNode
+        Tree relating the OTUs in otu_ids. The set of tip names in the tree can
+        be a superset of ``otu_ids``, but not a subset.
+    validate: bool, optional
+        If `False`, validation of the input won't be performed.
+
+    Returns
+    -------
+    function
+        Optimized pairwise unweighted UniFrac calculator that can be passed
+        to ``scipy.spatial.distance.pdist``.
+    2D np.array of ints, floats
+        Counts of all nodes in ``tree``.
+
+    """
+    counts_by_node, tree_index, branch_lengths = \
+        _setup_multiple_unifrac(counts, otu_ids, tree, validate)
+    tip_indices = _get_tip_indices(tree_index)
+
+    if normalized:
+        node_to_root_distances = _tip_distances(branch_lengths, tree,
+                                                tip_indices)
+
+        def f(u_node_counts, v_node_counts):
+            u_total_count = np.take(u_node_counts, tip_indices).sum()
+            v_total_count = np.take(v_node_counts, tip_indices).sum()
+            u = _weighted_unifrac_normalized(
+                    u_node_counts, v_node_counts, u_total_count, v_total_count,
+                    branch_lengths, node_to_root_distances)
+            return u
+    else:
+
+        def f(u_node_counts, v_node_counts):
+            u_total_count = np.take(u_node_counts, tip_indices).sum()
+            v_total_count = np.take(v_node_counts, tip_indices).sum()
+            u, _, _ = _weighted_unifrac(u_node_counts, v_node_counts,
+                                        u_total_count, v_total_count,
+                                        branch_lengths)
+            return u
+
+    return f, counts_by_node
+
+
+def _get_tip_indices(tree_index):
+    tip_indices = np.array([n.id for n in tree_index['id_index'].values()
+                            if n.is_tip()])
+    return tip_indices
+
+
+def _weighted_unifrac_branch_correction(node_to_root_distances,
+                                        u_node_proportions,
+                                        v_node_proportions):
+    """Calculates weighted unifrac branch length correction.
+
+    Parameters
+    ----------
+    node_to_root_distances : np.ndarray
+        1D column vector of branch lengths in post order form. There should be
+        positions in this vector for all nodes in the tree, but only tips
+        should be non-zero.
+    u_node_proportions, v_node_proportions : np.ndarray
+        Proportional abundace of observations of all nodes in the tree in
+        samples ``u`` and ``v``, respectively.
+    u_total_count, v_total_count : float
+        The sum of the observations in samples ``u`` and ``v``, respectively.
+
+    Returns
+    -------
+    np.ndarray
+        The corrected branch lengths
+    """
+    return (node_to_root_distances.ravel() *
+            (u_node_proportions + v_node_proportions)).sum()
diff --git a/skbio/diversity/beta/tests/data/qiime-191-tt/README.md b/skbio/diversity/beta/tests/data/qiime-191-tt/README.md
new file mode 100644
index 0000000..2925bee
--- /dev/null
+++ b/skbio/diversity/beta/tests/data/qiime-191-tt/README.md
@@ -0,0 +1,26 @@
+Files in this directory are the QIIME 1.9.1 "tiny test" files. These data were developed by @gregcaporaso, who gave permission to reproduce them in scikit-bio.
+
+If you have a [QIIME 1.9.1 base installation](http://install.qiime.org), the raw input files in this directory can be obtained by running:
+
+```bash
+python -c "from qiime.test import write_test_data; write_test_data('.')"
+biom convert -i biom --to-tsv -o otu-table.tsv
+```
+
+After converting to tsv, the following OTUs are removed because they are not present in the tree (they're not 16S sequences, so can't be aligned with PyNAST): ``None1``, ``None10``, ``None6``, and ``None2``. The ``not16S.1`` sample is also removed because, after removing those OTUs, it has a total count of 0. This boundary case is tested directly in the ``unifrac_*`` and ``faith_pd`` tests.
+
+Then, in the python interpreter, we midpoint root the tree (since this is a QIIME 1.9.1 installation, this step is performed with scikit-bio 0.2.3):
+
+```python
+from skbio import TreeNode
+t = TreeNode.read('./tree')
+t = t.root_at_midpoint()
+t.write('tree', format='newick')
+```
+
+The output files (alpha diversity values and beta diversity distance matrices) can then be obtained by running:
+
+```bash
+alpha_diversity.py -i biom -t tree -m PD_whole_tree -o pd.txt
+beta_diversity.py -m weighted_unifrac,unweighted_unifrac,weighted_normalized_unifrac -i biom -t tree -o o
+```
diff --git a/skbio/diversity/beta/tests/data/qiime-191-tt/otu-table.tsv b/skbio/diversity/beta/tests/data/qiime-191-tt/otu-table.tsv
new file mode 100644
index 0000000..4aff943
--- /dev/null
+++ b/skbio/diversity/beta/tests/data/qiime-191-tt/otu-table.tsv
@@ -0,0 +1,12 @@
+# Constructed from biom file
+#OTU ID	f2	f1	f3	f4	p2	p1	t1	t2
+295053	20	18	18	22	4	0	0	0
+42684	0	0	0	0	1	0	0	0
+None11	1	0	0	0	1	1	0	0
+None7	0	0	0	0	1	0	0	0
+None5	0	0	0	0	1	0	0	0
+None4	0	0	0	0	1	1	0	0
+None3	0	0	0	0	1	0	2	3
+879972	0	0	0	0	9	20	1	4
+None9	0	0	0	0	3	0	19	15
+None8	1	4	4	0	0	0	0	0
diff --git a/skbio/diversity/beta/tests/data/qiime-191-tt/tree.nwk b/skbio/diversity/beta/tests/data/qiime-191-tt/tree.nwk
new file mode 100644
index 0000000..199b8ca
--- /dev/null
+++ b/skbio/diversity/beta/tests/data/qiime-191-tt/tree.nwk
@@ -0,0 +1 @@
+(((879972:0.05039,None3:0.00778)0.980:0.15948,((None11:0.07161,None4:0.06965)0.917:0.09643,(295053:0.06096,42684:0.15599)0.910:0.08898)0.899:0.09227)0.958:0.064315,(None8:0.09606,(None7:0.10435,(None5:0.02626,None9:0.00014)1.000:0.25335)0.753:0.0465):0.075445)root;
diff --git a/skbio/diversity/beta/tests/data/qiime-191-tt/unweighted_unifrac_dm.txt b/skbio/diversity/beta/tests/data/qiime-191-tt/unweighted_unifrac_dm.txt
new file mode 100644
index 0000000..9a88af3
--- /dev/null
+++ b/skbio/diversity/beta/tests/data/qiime-191-tt/unweighted_unifrac_dm.txt
@@ -0,0 +1,10 @@
+	f2	f1	f3	f4	p2	p1	t1	not16S.1	t2
+f2	0.0	0.26009565527	0.26009565527	0.525554506478	0.638141793205	0.649277757971	0.879901349993	1.0	0.879901349993
+f1	0.26009565527	0.0	0.0	0.358774553898	0.74869733414	0.830826823972	0.859632207458	1.0	0.859632207458
+f3	0.26009565527	0.0	0.0	0.358774553898	0.74869733414	0.830826823972	0.859632207458	1.0	0.859632207458
+f4	0.525554506478	0.358774553898	0.358774553898	0.0	0.784728562399	0.792350994914	0.928507908983	1.0	0.928507908983
+p2	0.638141793205	0.74869733414	0.74869733414	0.784728562399	0.0	0.575711075216	0.538310274598	1.0	0.538310274598
+p1	0.649277757971	0.830826823972	0.830826823972	0.792350994914	0.575711075216	0.0	0.72230493437	1.0	0.72230493437
+t1	0.879901349993	0.859632207458	0.859632207458	0.928507908983	0.538310274598	0.72230493437	0.0	1.0	0.0
+not16S.1	1.0	1.0	1.0	1.0	1.0	1.0	1.0	0.0	1.0
+t2	0.879901349993	0.859632207458	0.859632207458	0.928507908983	0.538310274598	0.72230493437	0.0	1.0	0.0
\ No newline at end of file
diff --git a/skbio/diversity/beta/tests/data/qiime-191-tt/weighted_normalized_unifrac_dm.txt b/skbio/diversity/beta/tests/data/qiime-191-tt/weighted_normalized_unifrac_dm.txt
new file mode 100644
index 0000000..a3e5984
--- /dev/null
+++ b/skbio/diversity/beta/tests/data/qiime-191-tt/weighted_normalized_unifrac_dm.txt
@@ -0,0 +1,10 @@
+	f2	f1	f3	f4	p2	p1	t1	not16S.1	t2
+f2	0.0	0.113186179375	0.113186179375	0.0595362216515	0.599928715677	0.732991733568	0.962973616339	1.0	0.925172353416
+f1	0.113186179375	0.0	0.0	0.14768803941	0.577821666109	0.782364205648	0.929696953333	1.0	0.889628707676
+f3	0.113186179375	0.0	0.0	0.14768803941	0.577821666109	0.782364205648	0.929696953333	1.0	0.889628707676
+f4	0.0595362216515	0.14768803941	0.14768803941	0.0	0.639547977307	0.751529763393	0.973594769453	1.0	0.936441078767
+p2	0.599928715677	0.577821666109	0.577821666109	0.639547977307	0.0	0.430712953163	0.677634129601	1.0	0.519557525688
+p1	0.732991733568	0.782364205648	0.782364205648	0.751529763393	0.430712953163	0.0	0.896900994816	1.0	0.739098773939
+t1	0.962973616339	0.929696953333	0.929696953333	0.973594769453	0.677634129601	0.896900994816	0.0	1.0	0.167120342575
+not16S.1	1.0	1.0	1.0	1.0	1.0	1.0	1.0	0.0	1.0
+t2	0.925172353416	0.889628707676	0.889628707676	0.936441078767	0.519557525688	0.739098773939	0.167120342575	1.0	0.0
\ No newline at end of file
diff --git a/skbio/diversity/beta/tests/data/qiime-191-tt/weighted_unifrac_dm.txt b/skbio/diversity/beta/tests/data/qiime-191-tt/weighted_unifrac_dm.txt
new file mode 100644
index 0000000..e1a3992
--- /dev/null
+++ b/skbio/diversity/beta/tests/data/qiime-191-tt/weighted_unifrac_dm.txt
@@ -0,0 +1,10 @@
+	f2	f1	f3	f4	p2	p1	t1	not16S.1	t2
+f2	0.0	0.0660086363636	0.0660086363636	0.0361822727273	0.364183181818	0.425055909091	0.634565909091	1.0	0.590832727273
+f1	0.0660086363636	0.0	0.0	0.0869145454545	0.339649090909	0.438638181818	0.594755454545	1.0	0.551022272727
+f3	0.0660086363636	0.0	0.0	0.0869145454545	0.339649090909	0.438638181818	0.594755454545	1.0	0.551022272727
+f4	0.0361822727273	0.0869145454545	0.0869145454545	0.0	0.391632727273	0.4398	0.646739090909	1.0	0.603005909091
+p2	0.364183181818	0.339649090909	0.339649090909	0.391632727273	0.0	0.251758181818	0.44967	1.0	0.334201363636
+p1	0.425055909091	0.438638181818	0.438638181818	0.4398	0.251758181818	0.0	0.57082	1.0	0.455351363636
+t1	0.634565909091	0.594755454545	0.594755454545	0.646739090909	0.44967	0.57082	0.0	1.0	0.116175909091
+not16S.1	1.0	1.0	1.0	1.0	1.0	1.0	1.0	0.0	1.0
+t2	0.590832727273	0.551022272727	0.551022272727	0.603005909091	0.334201363636	0.455351363636	0.116175909091	1.0	0.0
\ No newline at end of file
diff --git a/skbio/diversity/beta/tests/test_base.py b/skbio/diversity/beta/tests/test_base.py
deleted file mode 100644
index f2f9cd7..0000000
--- a/skbio/diversity/beta/tests/test_base.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-from unittest import TestCase, main
-
-import numpy as np
-import numpy.testing as npt
-
-from skbio import DistanceMatrix
-from skbio.diversity.beta import pw_distances, pw_distances_from_table
-
-
-class HelperBiomTable(object):
-    """An object that looks like a BIOM table, for use in testing
-
-    This allows us to test passing BIOM-like objects, without having to
-    depend on the biom-format project (since this would ultimately be a
-    circular dependency).
-    """
-
-    def __init__(self, data, observation_ids, sample_ids):
-        self._data = data.T
-        self.observation_ids = observation_ids
-        self.sample_ids = sample_ids
-
-    def ids(self, axis):
-        return self.sample_ids
-
-    def data(self, sample_id):
-        i = self.sample_ids.index(sample_id)
-        return self._data[i]
-
-
-class BaseTests(TestCase):
-    def setUp(self):
-        self.t1 = [[1, 5],
-                   [2, 3],
-                   [0, 1]]
-        self.ids1 = list('ABC')
-
-        self.t2 = [[23, 64, 14, 0, 0, 3, 1],
-                   [0, 3, 35, 42, 0, 12, 1],
-                   [0, 5, 5, 0, 40, 40, 0],
-                   [44, 35, 9, 0, 1, 0, 0],
-                   [0, 2, 8, 0, 35, 45, 1],
-                   [0, 0, 25, 35, 0, 19, 0]]
-        self.ids2 = list('ABCDEF')
-
-        # In the future, if necessary, it should be possible to just replace
-        # HelperBiomTable with Table in the following lines to test with the
-        # biom.table.Table object directly (i.e., this constructor
-        # interface aligns with the biom.table.Table constructor
-        # interface).
-        self.table1 = HelperBiomTable(
-            np.array(self.t1).T, observation_ids=range(2),
-            sample_ids=self.ids1)
-        self.table2 = HelperBiomTable(
-            np.array(self.t2).T, observation_ids=range(7),
-            sample_ids=self.ids2)
-
-    def test_pw_distances_invalid_input(self):
-        # number of ids doesn't match the number of samples
-        self.assertRaises(ValueError, pw_distances, self.t1, list('AB'),
-                          'euclidean')
-
-    def test_pw_distances_euclidean(self):
-        actual_dm = pw_distances(self.t1, self.ids1, 'euclidean')
-        self.assertEqual(actual_dm.shape, (3, 3))
-        npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
-        npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
-        npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
-        npt.assert_almost_equal(actual_dm['A', 'B'], 2.23606798)
-        npt.assert_almost_equal(actual_dm['B', 'A'], 2.23606798)
-        npt.assert_almost_equal(actual_dm['A', 'C'], 4.12310563)
-        npt.assert_almost_equal(actual_dm['C', 'A'], 4.12310563)
-        npt.assert_almost_equal(actual_dm['B', 'C'], 2.82842712)
-        npt.assert_almost_equal(actual_dm['C', 'B'], 2.82842712)
-
-        actual_dm = pw_distances(self.t2, self.ids2, 'euclidean')
-        expected_data = [
-            [0., 80.8455317, 84.0297566, 36.3042697, 86.0116271, 78.9176786],
-            [80.8455317, 0., 71.0844568, 74.4714710, 69.3397433, 14.422205],
-            [84.0297566, 71.0844568, 0., 77.2851861, 8.3066238, 60.7536007],
-            [36.3042697, 74.4714710, 77.2851861, 0., 78.7908624, 70.7389567],
-            [86.0116271, 69.3397433, 8.3066238, 78.7908624, 0., 58.4807660],
-            [78.9176786, 14.422205, 60.7536007, 70.7389567, 58.4807660, 0.]]
-        expected_dm = DistanceMatrix(expected_data, self.ids2)
-        for id1 in self.ids2:
-            for id2 in self.ids2:
-                npt.assert_almost_equal(actual_dm[id1, id2],
-                                        expected_dm[id1, id2], 6)
-
-    def test_pw_distances_braycurtis(self):
-        actual_dm = pw_distances(self.t1, self.ids1, 'braycurtis')
-        self.assertEqual(actual_dm.shape, (3, 3))
-        npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
-        npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
-        npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
-        npt.assert_almost_equal(actual_dm['A', 'B'], 0.27272727)
-        npt.assert_almost_equal(actual_dm['B', 'A'], 0.27272727)
-        npt.assert_almost_equal(actual_dm['A', 'C'], 0.71428571)
-        npt.assert_almost_equal(actual_dm['C', 'A'], 0.71428571)
-        npt.assert_almost_equal(actual_dm['B', 'C'], 0.66666667)
-        npt.assert_almost_equal(actual_dm['C', 'B'], 0.66666667)
-
-        actual_dm = pw_distances(self.t2, self.ids2, 'braycurtis')
-        expected_data = [
-            [0., 0.78787879, 0.86666667, 0.30927835, 0.85714286, 0.81521739],
-            [0.78787879, 0., 0.78142077, 0.86813187, 0.75, 0.1627907],
-            [0.86666667, 0.78142077, 0., 0.87709497, 0.09392265, 0.71597633],
-            [0.30927835, 0.86813187, 0.87709497, 0., 0.87777778, 0.89285714],
-            [0.85714286, 0.75, 0.09392265, 0.87777778, 0., 0.68235294],
-            [0.81521739, 0.1627907, 0.71597633, 0.89285714, 0.68235294, 0.]]
-        expected_dm = DistanceMatrix(expected_data, self.ids2)
-        for id1 in self.ids2:
-            for id2 in self.ids2:
-                npt.assert_almost_equal(actual_dm[id1, id2],
-                                        expected_dm[id1, id2], 6)
-
-    def test_pw_distances_from_table_euclidean(self):
-        # results are equal when passed as Table or matrix
-        m_dm = pw_distances(self.t1, self.ids1, 'euclidean')
-        t_dm = npt.assert_warns(
-            DeprecationWarning, pw_distances_from_table, self.table1,
-            'euclidean')
-        for id1 in self.ids1:
-            for id2 in self.ids1:
-                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
-
-        m_dm = pw_distances(self.t2, self.ids2, 'euclidean')
-        t_dm = npt.assert_warns(
-            DeprecationWarning, pw_distances_from_table, self.table2,
-            'euclidean')
-        for id1 in self.ids2:
-            for id2 in self.ids2:
-                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
-
-    def test_pw_distances_from_table_braycurtis(self):
-        # results are equal when passed as Table or matrix
-        m_dm = pw_distances(self.t1, self.ids1, 'braycurtis')
-        t_dm = npt.assert_warns(
-            DeprecationWarning, pw_distances_from_table, self.table1,
-            'braycurtis')
-        for id1 in self.ids1:
-            for id2 in self.ids1:
-                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
-
-        m_dm = pw_distances(self.t2, self.ids2, 'braycurtis')
-        t_dm = npt.assert_warns(
-            DeprecationWarning, pw_distances_from_table, self.table2,
-            'braycurtis')
-        for id1 in self.ids2:
-            for id2 in self.ids2:
-                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
-
-
-if __name__ == "__main__":
-    main()
diff --git a/skbio/diversity/beta/tests/test_unifrac.py b/skbio/diversity/beta/tests/test_unifrac.py
new file mode 100644
index 0000000..b762429
--- /dev/null
+++ b/skbio/diversity/beta/tests/test_unifrac.py
@@ -0,0 +1,690 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from io import StringIO
+from unittest import main, TestCase
+
+import numpy as np
+
+from skbio import TreeNode
+from skbio.tree import DuplicateNodeError, MissingNodeError
+from skbio.diversity.beta import unweighted_unifrac, weighted_unifrac
+from skbio.diversity.beta._unifrac import (_unweighted_unifrac,
+                                           _weighted_unifrac,
+                                           _weighted_unifrac_branch_correction)
+
+
+class UnifracTests(TestCase):
+
+    def setUp(self):
+        self.b1 = np.array(
+            [[1, 3, 0, 1, 0],
+             [0, 2, 0, 4, 4],
+             [0, 0, 6, 2, 1],
+             [0, 0, 1, 1, 1],
+             [5, 3, 5, 0, 0],
+             [0, 0, 0, 3, 5]])
+        self.sids1 = list('ABCDEF')
+        self.oids1 = ['OTU%d' % i for i in range(1, 6)]
+        self.t1 = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        self.t1_w_extra_tips = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
+                     u')root;'))
+
+        self.t2 = TreeNode.read(
+            StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
+                     u'root;'))
+        self.oids2 = ['OTU%d' % i for i in range(1, 5)]
+
+    def test_unweighted_otus_out_of_order(self):
+        # UniFrac API does not assert the observations are in tip order of the
+        # input tree
+        shuffled_ids = self.oids1[:]
+        shuffled_b1 = self.b1.copy()
+
+        shuffled_ids[0], shuffled_ids[-1] = shuffled_ids[-1], shuffled_ids[0]
+        shuffled_b1[:, [0, -1]] = shuffled_b1[:, [-1, 0]]
+
+        for i in range(len(self.b1)):
+            for j in range(len(self.b1)):
+                actual = unweighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1)
+                expected = unweighted_unifrac(
+                    shuffled_b1[i], shuffled_b1[j], shuffled_ids, self.t1)
+                self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_otus_out_of_order(self):
+        # UniFrac API does not assert the observations are in tip order of the
+        # input tree
+        shuffled_ids = self.oids1[:]
+        shuffled_b1 = self.b1.copy()
+
+        shuffled_ids[0], shuffled_ids[-1] = shuffled_ids[-1], shuffled_ids[0]
+        shuffled_b1[:, [0, -1]] = shuffled_b1[:, [-1, 0]]
+
+        for i in range(len(self.b1)):
+            for j in range(len(self.b1)):
+                actual = weighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1)
+                expected = weighted_unifrac(
+                    shuffled_b1[i], shuffled_b1[j], shuffled_ids, self.t1)
+                self.assertAlmostEqual(actual, expected)
+
+    def test_unweighted_extra_tips(self):
+        # UniFrac values are the same despite unobserved tips in the tree
+        for i in range(len(self.b1)):
+            for j in range(len(self.b1)):
+                actual = unweighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1_w_extra_tips)
+                expected = unweighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1)
+                self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_extra_tips(self):
+        # UniFrac values are the same despite unobserved tips in the tree
+        for i in range(len(self.b1)):
+            for j in range(len(self.b1)):
+                actual = weighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1_w_extra_tips)
+                expected = weighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1)
+                self.assertAlmostEqual(actual, expected)
+
+    def test_unweighted_minimal_trees(self):
+        # two tips
+        tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
+        actual = unweighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'],
+                                    tree)
+        expected = 1.0
+        self.assertEqual(actual, expected)
+
+    def test_weighted_minimal_trees(self):
+        # two tips
+        tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
+        actual = weighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'], tree)
+        expected = 0.25
+        self.assertEqual(actual, expected)
+
+    def test_unweighted_root_not_observed(self):
+        # expected values computed with QIIME 1.9.1 and by hand
+        # root node not observed, but branch between (OTU1, OTU2) and root
+        # is considered shared
+        actual = unweighted_unifrac([1, 1, 0, 0], [1, 0, 0, 0],
+                                    self.oids2, self.t2)
+        # for clarity of what I'm testing, compute expected as it would
+        # based on the branch lengths. the values that compose shared was
+        # a point of confusion for me here, so leaving these in for
+        # future reference
+        expected = 0.2 / (0.1 + 0.2 + 0.3)  # 0.3333333333
+        self.assertAlmostEqual(actual, expected)
+
+        # root node not observed, but branch between (OTU3, OTU4) and root
+        # is considered shared
+        actual = unweighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
+                                    self.oids2, self.t2)
+        # for clarity of what I'm testing, compute expected as it would
+        # based on the branch lengths. the values that compose shared was
+        # a point of confusion for me here, so leaving these in for
+        # future reference
+        expected = 0.7 / (1.1 + 0.5 + 0.7)  # 0.3043478261
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_root_not_observed(self):
+        # expected values computed by hand, these disagree with QIIME 1.9.1
+        # root node not observed, but branch between (OTU1, OTU2) and root
+        # is considered shared
+        actual = weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0],
+                                  self.oids2, self.t2)
+        expected = 0.15
+        self.assertAlmostEqual(actual, expected)
+
+        # root node not observed, but branch between (OTU3, OTU4) and root
+        # is considered shared
+        actual = weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
+                                  self.oids2, self.t2)
+        expected = 0.6
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_normalized_root_not_observed(self):
+        # expected values computed by hand, these disagree with QIIME 1.9.1
+        # root node not observed, but branch between (OTU1, OTU2) and root
+        # is considered shared
+        actual = weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0],
+                                  self.oids2, self.t2, normalized=True)
+        expected = 0.1764705882
+        self.assertAlmostEqual(actual, expected)
+
+        # root node not observed, but branch between (OTU3, OTU4) and root
+        # is considered shared
+        actual = weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
+                                  self.oids2, self.t2, normalized=True)
+        expected = 0.1818181818
+        self.assertAlmostEqual(actual, expected)
+
+    def test_unweighted_unifrac_identity(self):
+        for i in range(len(self.b1)):
+            actual = unweighted_unifrac(
+                self.b1[i], self.b1[i], self.oids1, self.t1)
+            expected = 0.0
+            self.assertAlmostEqual(actual, expected)
+
+    def test_unweighted_unifrac_symmetry(self):
+        for i in range(len(self.b1)):
+            for j in range(len(self.b1)):
+                actual = unweighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1)
+                expected = unweighted_unifrac(
+                    self.b1[j], self.b1[i], self.oids1, self.t1)
+                self.assertAlmostEqual(actual, expected)
+
+    def test_invalid_input(self):
+        # Many of these tests are duplicated from
+        # skbio.diversity.tests.test_base, but I think it's important to
+        # confirm that they are being run when *unifrac is called.
+
+        # tree has duplicated tip ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(DuplicateNodeError, unweighted_unifrac,
+                          u_counts, v_counts, otu_ids, t)
+        self.assertRaises(DuplicateNodeError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+        # unrooted tree as input
+        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                   u'OTU4:0.7);'))
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+        # otu_ids has duplicated ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU2']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+        # len of vectors not equal
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        u_counts = [1, 2]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+        # negative counts
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        u_counts = [1, 2, -3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, -1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+        # tree with no branch lengths
+        t = TreeNode.read(
+            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+        # tree missing some branch lengths
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(ValueError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+        # otu_ids not present in tree
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        u_counts = [1, 2, 3]
+        v_counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU42']
+        self.assertRaises(MissingNodeError, unweighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+        self.assertRaises(MissingNodeError, weighted_unifrac, u_counts,
+                          v_counts, otu_ids, t)
+
+    def test_unweighted_unifrac_non_overlapping(self):
+        # these communities only share the root node
+        actual = unweighted_unifrac(
+            self.b1[4], self.b1[5], self.oids1, self.t1)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            [1, 1, 1, 0, 0], [0, 0, 0, 1, 1], self.oids1, self.t1)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_unweighted_unifrac_zero_counts(self):
+        actual = unweighted_unifrac(
+            [1, 1, 1, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            [], [], [], self.t1)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_unweighted_unifrac(self):
+        # expected results derived from QIIME 1.9.1, which
+        # is a completely different implementation skbio's initial
+        # unweighted unifrac implementation
+        # sample A versus all
+        actual = unweighted_unifrac(
+            self.b1[0], self.b1[1], self.oids1, self.t1)
+        expected = 0.238095238095
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[0], self.b1[2], self.oids1, self.t1)
+        expected = 0.52
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[0], self.b1[3], self.oids1, self.t1)
+        expected = 0.52
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[0], self.b1[4], self.oids1, self.t1)
+        expected = 0.545454545455
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[0], self.b1[5], self.oids1, self.t1)
+        expected = 0.619047619048
+        self.assertAlmostEqual(actual, expected)
+        # sample B versus remaining
+        actual = unweighted_unifrac(
+            self.b1[1], self.b1[2], self.oids1, self.t1)
+        expected = 0.347826086957
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[1], self.b1[3], self.oids1, self.t1)
+        expected = 0.347826086957
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[1], self.b1[4], self.oids1, self.t1)
+        expected = 0.68
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[1], self.b1[5], self.oids1, self.t1)
+        expected = 0.421052631579
+        self.assertAlmostEqual(actual, expected)
+        # sample C versus remaining
+        actual = unweighted_unifrac(
+            self.b1[2], self.b1[3], self.oids1, self.t1)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[2], self.b1[4], self.oids1, self.t1)
+        expected = 0.68
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[2], self.b1[5], self.oids1, self.t1)
+        expected = 0.421052631579
+        self.assertAlmostEqual(actual, expected)
+        # sample D versus remaining
+        actual = unweighted_unifrac(
+            self.b1[3], self.b1[4], self.oids1, self.t1)
+        expected = 0.68
+        self.assertAlmostEqual(actual, expected)
+        actual = unweighted_unifrac(
+            self.b1[3], self.b1[5], self.oids1, self.t1)
+        expected = 0.421052631579
+        self.assertAlmostEqual(actual, expected)
+        # sample E versus remaining
+        actual = unweighted_unifrac(
+            self.b1[4], self.b1[5], self.oids1, self.t1)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_identity(self):
+        for i in range(len(self.b1)):
+            actual = weighted_unifrac(
+                self.b1[i], self.b1[i], self.oids1, self.t1)
+            expected = 0.0
+            self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_symmetry(self):
+        for i in range(len(self.b1)):
+            for j in range(len(self.b1)):
+                actual = weighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1)
+                expected = weighted_unifrac(
+                    self.b1[j], self.b1[i], self.oids1, self.t1)
+                self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_non_overlapping(self):
+        # expected results derived from QIIME 1.9.1, which
+        # is a completely different implementation skbio's initial
+        # weighted unifrac implementation
+        # these communities only share the root node
+        actual = weighted_unifrac(
+            self.b1[4], self.b1[5], self.oids1, self.t1)
+        expected = 4.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_zero_counts(self):
+        actual = weighted_unifrac(
+            [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+        # calculated the following by hand, as QIIME 1.9.1 tells the user
+        # that values involving empty vectors will be uninformative, and
+        # returns 1.0
+        actual = weighted_unifrac(
+            [1, 1, 1, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
+        expected = 2.0
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            [], [], [], self.t1)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac(self):
+        # expected results derived from QIIME 1.9.1, which
+        # is a completely different implementation skbio's initial
+        # weighted unifrac implementation
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[1], self.oids1, self.t1)
+        expected = 2.4
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[2], self.oids1, self.t1)
+        expected = 1.86666666667
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[3], self.oids1, self.t1)
+        expected = 2.53333333333
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[4], self.oids1, self.t1)
+        expected = 1.35384615385
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[5], self.oids1, self.t1)
+        expected = 3.2
+        self.assertAlmostEqual(actual, expected)
+        # sample B versus remaining
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[2], self.oids1, self.t1)
+        expected = 2.26666666667
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[3], self.oids1, self.t1)
+        expected = 0.933333333333
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[4], self.oids1, self.t1)
+        expected = 3.2
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[5], self.oids1, self.t1)
+        expected = 0.8375
+        self.assertAlmostEqual(actual, expected)
+        # sample C versus remaining
+        actual = weighted_unifrac(
+            self.b1[2], self.b1[3], self.oids1, self.t1)
+        expected = 1.33333333333
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[2], self.b1[4], self.oids1, self.t1)
+        expected = 1.89743589744
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[2], self.b1[5], self.oids1, self.t1)
+        expected = 2.66666666667
+        self.assertAlmostEqual(actual, expected)
+        # sample D versus remaining
+        actual = weighted_unifrac(
+            self.b1[3], self.b1[4], self.oids1, self.t1)
+        expected = 2.66666666667
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[3], self.b1[5], self.oids1, self.t1)
+        expected = 1.33333333333
+        self.assertAlmostEqual(actual, expected)
+        # sample E versus remaining
+        actual = weighted_unifrac(
+            self.b1[4], self.b1[5], self.oids1, self.t1)
+        expected = 4.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_identity_normalized(self):
+        for i in range(len(self.b1)):
+            actual = weighted_unifrac(
+                self.b1[i], self.b1[i], self.oids1, self.t1, normalized=True)
+            expected = 0.0
+            self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_symmetry_normalized(self):
+        for i in range(len(self.b1)):
+            for j in range(len(self.b1)):
+                actual = weighted_unifrac(
+                    self.b1[i], self.b1[j], self.oids1, self.t1,
+                    normalized=True)
+                expected = weighted_unifrac(
+                    self.b1[j], self.b1[i], self.oids1, self.t1,
+                    normalized=True)
+                self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_non_overlapping_normalized(self):
+        # these communities only share the root node
+        actual = weighted_unifrac(
+            self.b1[4], self.b1[5], self.oids1, self.t1, normalized=True)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            [1, 1, 1, 0, 0], [0, 0, 0, 1, 1], self.oids1, self.t1,
+            normalized=True)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_zero_counts_normalized(self):
+        # expected results derived from QIIME 1.9.1, which
+        # is a completely different implementation skbio's initial
+        # weighted unifrac implementation
+        actual = weighted_unifrac(
+            [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1,
+            normalized=True)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            [1, 1, 1, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1,
+            normalized=True)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            [], [], [], self.t1, normalized=True)
+        expected = 0.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_normalized(self):
+        # expected results derived from QIIME 1.9.1, which
+        # is a completely different implementation skbio's initial
+        # weighted unifrac implementation
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[1], self.oids1, self.t1, normalized=True)
+        expected = 0.6
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[2], self.oids1, self.t1, normalized=True)
+        expected = 0.466666666667
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[3], self.oids1, self.t1, normalized=True)
+        expected = 0.633333333333
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[4], self.oids1, self.t1, normalized=True)
+        expected = 0.338461538462
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[0], self.b1[5], self.oids1, self.t1, normalized=True)
+        expected = 0.8
+        self.assertAlmostEqual(actual, expected)
+        # sample B versus remaining
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[2], self.oids1, self.t1, normalized=True)
+        expected = 0.566666666667
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[3], self.oids1, self.t1, normalized=True)
+        expected = 0.233333333333
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[4], self.oids1, self.t1, normalized=True)
+        expected = 0.8
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[1], self.b1[5], self.oids1, self.t1, normalized=True)
+        expected = 0.209375
+        self.assertAlmostEqual(actual, expected)
+        # sample C versus remaining
+        actual = weighted_unifrac(
+            self.b1[2], self.b1[3], self.oids1, self.t1, normalized=True)
+        expected = 0.333333333333
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[2], self.b1[4], self.oids1, self.t1, normalized=True)
+        expected = 0.474358974359
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[2], self.b1[5], self.oids1, self.t1, normalized=True)
+        expected = 0.666666666667
+        self.assertAlmostEqual(actual, expected)
+        # sample D versus remaining
+        actual = weighted_unifrac(
+            self.b1[3], self.b1[4], self.oids1, self.t1, normalized=True)
+        expected = 0.666666666667
+        self.assertAlmostEqual(actual, expected)
+        actual = weighted_unifrac(
+            self.b1[3], self.b1[5], self.oids1, self.t1, normalized=True)
+        expected = 0.333333333333
+        self.assertAlmostEqual(actual, expected)
+        # sample E versus remaining
+        actual = weighted_unifrac(
+            self.b1[4], self.b1[5], self.oids1, self.t1, normalized=True)
+        expected = 1.0
+        self.assertAlmostEqual(actual, expected)
+
+    def test_weighted_unifrac_branch_correction(self):
+        # for ((a:1, b:2)c:3,(d:4,e:5)f:6)root;"
+        tip_ds = np.array([4, 5, 10, 11, 0, 0, 0])[:, np.newaxis]
+        u_counts = np.array([1, 1, 0, 0, 2, 0, 2])
+        v_counts = np.array([0, 2, 1, 0, 2, 1, 3])
+        u_sum = 2  # counts at the tips
+        v_sum = 3
+        exp = np.array([2.0,
+                        5.0 * (.5 + (2.0/3.0)),
+                        10.0 * (1.0 / 3.0),
+                        0.0]).sum()
+        obs = _weighted_unifrac_branch_correction(
+            tip_ds, u_counts/u_sum, v_counts/v_sum)
+        self.assertEqual(obs, exp)
+
+    def test_unweighted_unifrac_pycogent_adapted(self):
+        # adapted from PyCogent unit tests
+        m = np.array([[1, 0, 1], [1, 1, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0],
+                      [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 1, 1]])
+        # lengths from ((a:1,b:2):4,(c:3,(d:1,e:1):2):3)
+        bl = np.array([1, 2, 1, 1, 3, 2, 4, 3, 0], dtype=float)
+        self.assertEqual(_unweighted_unifrac(m[:, 0], m[:, 1], bl), 10/16.0)
+        self.assertEqual(_unweighted_unifrac(m[:, 0], m[:, 2], bl), 8/13.0)
+        self.assertEqual(_unweighted_unifrac(m[:, 1], m[:, 2], bl), 8/17.0)
+
+    def test_weighted_unifrac_pycogent_adapted(self):
+        # lengths from ((a:1,b:2):4,(c:3,(d:1,e:1):2):3)
+        bl = np.array([1, 2, 1, 1, 3, 2, 4, 3, 0], dtype=float)
+
+        # adapted from PyCogent unit tests
+        m = np.array([[1, 0, 1],  # a
+                      [1, 1, 0],  # b
+                      [0, 1, 0],  # d
+                      [0, 0, 1],  # e
+                      [0, 1, 0],  # c
+                      [0, 1, 1],  # parent of (d, e)
+                      [2, 1, 1],  # parent of a, b
+                      [0, 2, 1],  # parent of c (d, e)
+                      [2, 3, 2]])  # root
+
+        # sum just the counts at the tips
+        m0s = m[:5, 0].sum()
+        m1s = m[:5, 1].sum()
+        m2s = m[:5, 2].sum()
+
+        # scores computed by educational implementation
+        self.assertAlmostEqual(
+            _weighted_unifrac(m[:, 0], m[:, 1], m0s, m1s, bl)[0], 7.5)
+        self.assertAlmostEqual(
+            _weighted_unifrac(m[:, 0], m[:, 2], m0s, m2s, bl)[0], 6.0)
+        self.assertAlmostEqual(
+            _weighted_unifrac(m[:, 1], m[:, 2], m1s, m2s, bl)[0], 4.5)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/draw/tests/__init__.py b/skbio/diversity/tests/__init__.py
similarity index 100%
rename from skbio/draw/tests/__init__.py
rename to skbio/diversity/tests/__init__.py
diff --git a/skbio/diversity/tests/test_driver.py b/skbio/diversity/tests/test_driver.py
new file mode 100644
index 0000000..15bf08f
--- /dev/null
+++ b/skbio/diversity/tests/test_driver.py
@@ -0,0 +1,632 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+
+import pandas as pd
+import numpy as np
+import numpy.testing as npt
+import six
+
+from skbio import DistanceMatrix, TreeNode
+from skbio.io._fileobject import StringIO
+from skbio.util._testing import assert_series_almost_equal
+from skbio.diversity import (alpha_diversity, beta_diversity,
+                             get_alpha_diversity_metrics,
+                             get_beta_diversity_metrics)
+from skbio.diversity.alpha import faith_pd, observed_otus
+from skbio.diversity.beta import unweighted_unifrac, weighted_unifrac
+from skbio.tree import DuplicateNodeError, MissingNodeError
+
+
+class AlphaDiversityTests(TestCase):
+    def setUp(self):
+        self.table1 = np.array([[1, 3, 0, 1, 0],
+                                [0, 2, 0, 4, 4],
+                                [0, 0, 6, 2, 1],
+                                [0, 0, 1, 1, 1]])
+        self.sids1 = list('ABCD')
+        self.oids1 = ['OTU%d' % i for i in range(1, 6)]
+        self.tree1 = TreeNode.read(StringIO(
+            u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
+
+        self.table2 = np.array([[1, 3],
+                                [0, 2],
+                                [0, 0]])
+        self.sids2 = list('xyz')
+        self.oids2 = ['OTU1', 'OTU5']
+        self.tree2 = TreeNode.read(StringIO(
+            u'(((((OTU1:42.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            u'0.0,(OTU4:0.75,OTU5:0.0001):1.25):0.0)root;'))
+
+    def test_invalid_input(self):
+        # number of ids doesn't match the number of samples
+        self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
+                          self.table1, list('ABC'))
+
+        # unknown metric provided
+        self.assertRaises(ValueError, alpha_diversity, 'not-a-metric',
+                          self.table1)
+
+        # 3-D list provided as input
+        self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
+                          [[[43]]])
+
+        # negative counts
+        self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
+                          [0, 3, -12, 42])
+
+        # additional kwargs
+        self.assertRaises(TypeError, alpha_diversity, 'observed_otus',
+                          [0, 1], not_a_real_kwarg=42.0)
+        self.assertRaises(TypeError, alpha_diversity, 'faith_pd',
+                          [0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
+                          not_a_real_kwarg=42.0)
+        self.assertRaises(TypeError, alpha_diversity, faith_pd,
+                          [0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
+                          not_a_real_kwarg=42.0)
+
+    def test_invalid_input_phylogenetic(self):
+        # otu_ids not provided
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
+                          list('ABC'), tree=self.tree1)
+        # tree not provided
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
+                          list('ABC'), otu_ids=self.oids1)
+
+        # tree has duplicated tip ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(DuplicateNodeError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+
+        # unrooted tree as input
+        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                   u'OTU4:0.7);'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+
+        # otu_ids has duplicated ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU2']
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+
+        # count and OTU vectors are not equal length
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2']
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 2]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+
+        # tree with no branch lengths
+        t = TreeNode.read(
+            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+
+        # tree missing some branch lengths
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+
+        # some otu_ids not present in tree
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU42']
+        self.assertRaises(MissingNodeError, alpha_diversity, 'faith_pd',
+                          counts, otu_ids=otu_ids, tree=t)
+
+    def test_empty(self):
+        # empty vector
+        actual = alpha_diversity('observed_otus', np.array([], dtype=np.int64))
+        expected = pd.Series([0])
+        assert_series_almost_equal(actual, expected)
+
+        # array of empty vector
+        actual = alpha_diversity('observed_otus',
+                                 np.array([[]], dtype=np.int64))
+        expected = pd.Series([0])
+        assert_series_almost_equal(actual, expected)
+
+        # array of empty vectors
+        actual = alpha_diversity('observed_otus',
+                                 np.array([[], []], dtype=np.int64))
+        expected = pd.Series([0, 0])
+        assert_series_almost_equal(actual, expected)
+
+        # empty vector
+        actual = alpha_diversity('faith_pd', np.array([], dtype=np.int64),
+                                 tree=self.tree1, otu_ids=[])
+        expected = pd.Series([0.])
+        assert_series_almost_equal(actual, expected)
+
+        # array of empty vector
+        actual = alpha_diversity('faith_pd',
+                                 np.array([[]], dtype=np.int64),
+                                 tree=self.tree1, otu_ids=[])
+        expected = pd.Series([0.])
+        assert_series_almost_equal(actual, expected)
+
+        # array of empty vectors
+        actual = alpha_diversity('faith_pd',
+                                 np.array([[], []], dtype=np.int64),
+                                 tree=self.tree1, otu_ids=[])
+        expected = pd.Series([0., 0.])
+        assert_series_almost_equal(actual, expected)
+
+    def test_single_count_vector(self):
+        actual = alpha_diversity('observed_otus', np.array([1, 0, 2]))
+        expected = pd.Series([2])
+        assert_series_almost_equal(actual, expected)
+
+        actual = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
+                                 tree=self.tree1, otu_ids=self.oids1)
+        self.assertAlmostEqual(actual[0], 4.5)
+
+    def test_input_types(self):
+        list_result = alpha_diversity('observed_otus', [1, 3, 0, 1, 0])
+        array_result = alpha_diversity('observed_otus',
+                                       np.array([1, 3, 0, 1, 0]))
+        self.assertAlmostEqual(list_result[0], 3)
+        assert_series_almost_equal(list_result, array_result)
+
+        list_result = alpha_diversity('faith_pd', [1, 3, 0, 1, 0],
+                                      tree=self.tree1, otu_ids=self.oids1)
+        array_result = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
+                                       tree=self.tree1, otu_ids=self.oids1)
+        self.assertAlmostEqual(list_result[0], 4.5)
+        assert_series_almost_equal(list_result, array_result)
+
+    def test_observed_otus(self):
+        # expected values hand-calculated
+        expected = pd.Series([3, 3, 3, 3], index=self.sids1)
+        actual = alpha_diversity('observed_otus', self.table1, self.sids1)
+        assert_series_almost_equal(actual, expected)
+        # function passed instead of string
+        actual = alpha_diversity(observed_otus, self.table1, self.sids1)
+        assert_series_almost_equal(actual, expected)
+        # alt input table
+        expected = pd.Series([2, 1, 0], index=self.sids2)
+        actual = alpha_diversity('observed_otus', self.table2, self.sids2)
+        assert_series_almost_equal(actual, expected)
+
+    def test_faith_pd(self):
+        # calling faith_pd through alpha_diversity gives same results as
+        # calling it directly
+        expected = []
+        for e in self.table1:
+            expected.append(faith_pd(e, tree=self.tree1, otu_ids=self.oids1))
+        expected = pd.Series(expected)
+        actual = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
+                                 otu_ids=self.oids1)
+        assert_series_almost_equal(actual, expected)
+
+        # alt input table and tree
+        expected = []
+        for e in self.table2:
+            expected.append(faith_pd(e, tree=self.tree2, otu_ids=self.oids2))
+        expected = pd.Series(expected)
+        actual = alpha_diversity('faith_pd', self.table2, tree=self.tree2,
+                                 otu_ids=self.oids2)
+        assert_series_almost_equal(actual, expected)
+
+    def test_no_ids(self):
+        # expected values hand-calculated
+        expected = pd.Series([3, 3, 3, 3])
+        actual = alpha_diversity('observed_otus', self.table1)
+        assert_series_almost_equal(actual, expected)
+
+    def test_optimized(self):
+        # calling optimized faith_pd gives same results as calling unoptimized
+        # version
+        optimized = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
+                                    otu_ids=self.oids1)
+        unoptimized = alpha_diversity(faith_pd, self.table1, tree=self.tree1,
+                                      otu_ids=self.oids1)
+        assert_series_almost_equal(optimized, unoptimized)
+
+
+class BetaDiversityTests(TestCase):
+    def setUp(self):
+        self.table1 = [[1, 5],
+                       [2, 3],
+                       [0, 1]]
+        self.sids1 = list('ABC')
+        self.tree1 = TreeNode.read(StringIO(
+            '((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
+        self.oids1 = ['O1', 'O2']
+
+        self.table2 = [[23, 64, 14, 0, 0, 3, 1],
+                       [0, 3, 35, 42, 0, 12, 1],
+                       [0, 5, 5, 0, 40, 40, 0],
+                       [44, 35, 9, 0, 1, 0, 0],
+                       [0, 2, 8, 0, 35, 45, 1],
+                       [0, 0, 25, 35, 0, 19, 0]]
+        self.sids2 = list('ABCDEF')
+
+    def test_invalid_input(self):
+        # number of ids doesn't match the number of samples
+        error_msg = ("Number of rows")
+        with six.assertRaisesRegex(self, ValueError, error_msg):
+            beta_diversity(self.table1, list('AB'), 'euclidean')
+
+        # unknown metric provided
+        error_msg = "not-a-metric"
+        with six.assertRaisesRegex(self, ValueError, error_msg):
+            beta_diversity('not-a-metric', self.table1)
+
+        # 3-D list provided as input
+        error_msg = ("Only 1-D and 2-D")
+        with six.assertRaisesRegex(self, ValueError, error_msg):
+            beta_diversity('euclidean', [[[43]]])
+
+        # negative counts
+        error_msg = "negative values."
+        with six.assertRaisesRegex(self, ValueError, error_msg):
+            beta_diversity('euclidean', [[0, 1, 3, 4], [0, 3, -12, 42]])
+        with six.assertRaisesRegex(self, ValueError, error_msg):
+            beta_diversity('euclidean', [[0, 1, 3, -4], [0, 3, 12, 42]])
+
+        # additional kwargs
+        error_msg = ("'not_a_real_kwarg'")
+        with six.assertRaisesRegex(self, TypeError, error_msg):
+            beta_diversity('euclidean', [[0, 1, 3], [0, 3, 12]],
+                           not_a_real_kwarg=42.0)
+        with six.assertRaisesRegex(self, TypeError, error_msg):
+            beta_diversity('unweighted_unifrac', [[0, 1, 3], [0, 3, 12]],
+                           not_a_real_kwarg=42.0, tree=self.tree1,
+                           otu_ids=['O1', 'O2', 'O3'])
+        with six.assertRaisesRegex(self, TypeError, error_msg):
+            beta_diversity('weighted_unifrac', [[0, 1, 3], [0, 3, 12]],
+                           not_a_real_kwarg=42.0, tree=self.tree1,
+                           otu_ids=['O1', 'O2', 'O3'])
+        with six.assertRaisesRegex(self, TypeError, error_msg):
+            beta_diversity(weighted_unifrac, [[0, 1, 3], [0, 3, 12]],
+                           not_a_real_kwarg=42.0, tree=self.tree1,
+                           otu_ids=['O1', 'O2', 'O3'])
+
+    def test_invalid_input_phylogenetic(self):
+        # otu_ids not provided
+        self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
+                          self.table1, list('ABC'), tree=self.tree1)
+        self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
+                          self.table1, list('ABC'), tree=self.tree1)
+        # tree not provided
+        self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
+                          self.table1, list('ABC'), otu_ids=self.oids1)
+        self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
+                          self.table1, list('ABC'), otu_ids=self.oids1)
+
+        # tree has duplicated tip ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(DuplicateNodeError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(DuplicateNodeError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+
+        # unrooted tree as input
+        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                   u'OTU4:0.7);'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(ValueError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+
+        # otu_ids has duplicated ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU2']
+        self.assertRaises(ValueError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(ValueError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+
+        # count and OTU vectors are not equal length
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2']
+        self.assertRaises(ValueError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(ValueError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 2]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(ValueError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+
+        # tree with no branch lengths
+        t = TreeNode.read(
+            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(ValueError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+
+        # tree missing some branch lengths
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(ValueError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+
+        # some otu_ids not present in tree
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU42']
+        self.assertRaises(MissingNodeError, beta_diversity,
+                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
+        self.assertRaises(MissingNodeError, beta_diversity,
+                          'unweighted_unifrac', counts, otu_ids=otu_ids,
+                          tree=t)
+
+    def test_empty(self):
+        # array of empty vectors
+        actual = beta_diversity('euclidean',
+                                np.array([[], []], dtype=np.int64),
+                                ids=['a', 'b'])
+        expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
+        npt.assert_array_equal(actual, expected_dm)
+
+        actual = beta_diversity('unweighted_unifrac',
+                                np.array([[], []], dtype=np.int64),
+                                ids=['a', 'b'], tree=self.tree1, otu_ids=[])
+        expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
+        self.assertEqual(actual, expected_dm)
+
+    def test_input_types(self):
+        actual_array = beta_diversity('euclidean',
+                                      np.array([[1, 5], [2, 3]]),
+                                      ids=['a', 'b'])
+        actual_list = beta_diversity('euclidean',
+                                     [[1, 5], [2, 3]], ids=['a', 'b'])
+        self.assertEqual(actual_array, actual_list)
+
+    def test_euclidean(self):
+        # TODO: update npt.assert_almost_equal calls to use DistanceMatrix
+        # near-equality testing when that support is available
+        actual_dm = beta_diversity('euclidean', self.table1, self.sids1)
+        self.assertEqual(actual_dm.shape, (3, 3))
+        npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
+        npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
+        npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
+        npt.assert_almost_equal(actual_dm['A', 'B'], 2.23606798)
+        npt.assert_almost_equal(actual_dm['B', 'A'], 2.23606798)
+        npt.assert_almost_equal(actual_dm['A', 'C'], 4.12310563)
+        npt.assert_almost_equal(actual_dm['C', 'A'], 4.12310563)
+        npt.assert_almost_equal(actual_dm['B', 'C'], 2.82842712)
+        npt.assert_almost_equal(actual_dm['C', 'B'], 2.82842712)
+
+        actual_dm = beta_diversity('euclidean', self.table2, self.sids2)
+        expected_data = [
+            [0., 80.8455317, 84.0297566, 36.3042697, 86.0116271, 78.9176786],
+            [80.8455317, 0., 71.0844568, 74.4714710, 69.3397433, 14.422205],
+            [84.0297566, 71.0844568, 0., 77.2851861, 8.3066238, 60.7536007],
+            [36.3042697, 74.4714710, 77.2851861, 0., 78.7908624, 70.7389567],
+            [86.0116271, 69.3397433, 8.3066238, 78.7908624, 0., 58.4807660],
+            [78.9176786, 14.422205, 60.7536007, 70.7389567, 58.4807660, 0.]]
+        expected_dm = DistanceMatrix(expected_data, self.sids2)
+        for id1 in self.sids2:
+            for id2 in self.sids2:
+                npt.assert_almost_equal(actual_dm[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_braycurtis(self):
+        # TODO: update npt.assert_almost_equal calls to use DistanceMatrix
+        # near-equality testing when that support is available
+        actual_dm = beta_diversity('braycurtis', self.table1, self.sids1)
+        self.assertEqual(actual_dm.shape, (3, 3))
+        npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
+        npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
+        npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
+        npt.assert_almost_equal(actual_dm['A', 'B'], 0.27272727)
+        npt.assert_almost_equal(actual_dm['B', 'A'], 0.27272727)
+        npt.assert_almost_equal(actual_dm['A', 'C'], 0.71428571)
+        npt.assert_almost_equal(actual_dm['C', 'A'], 0.71428571)
+        npt.assert_almost_equal(actual_dm['B', 'C'], 0.66666667)
+        npt.assert_almost_equal(actual_dm['C', 'B'], 0.66666667)
+
+        actual_dm = beta_diversity('braycurtis', self.table2, self.sids2)
+        expected_data = [
+            [0., 0.78787879, 0.86666667, 0.30927835, 0.85714286, 0.81521739],
+            [0.78787879, 0., 0.78142077, 0.86813187, 0.75, 0.1627907],
+            [0.86666667, 0.78142077, 0., 0.87709497, 0.09392265, 0.71597633],
+            [0.30927835, 0.86813187, 0.87709497, 0., 0.87777778, 0.89285714],
+            [0.85714286, 0.75, 0.09392265, 0.87777778, 0., 0.68235294],
+            [0.81521739, 0.1627907, 0.71597633, 0.89285714, 0.68235294, 0.]]
+        expected_dm = DistanceMatrix(expected_data, self.sids2)
+        for id1 in self.sids2:
+            for id2 in self.sids2:
+                npt.assert_almost_equal(actual_dm[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_unweighted_unifrac(self):
+        # TODO: update npt.assert_almost_equal calls to use DistanceMatrix
+        # near-equality testing when that support is available
+        # expected values calculated by hand
+        dm1 = beta_diversity('unweighted_unifrac', self.table1, self.sids1,
+                             otu_ids=self.oids1, tree=self.tree1)
+        dm2 = beta_diversity(unweighted_unifrac, self.table1, self.sids1,
+                             otu_ids=self.oids1, tree=self.tree1)
+        self.assertEqual(dm1.shape, (3, 3))
+        self.assertEqual(dm1, dm2)
+        expected_data = [[0.0, 0.0, 0.25],
+                         [0.0, 0.0, 0.25],
+                         [0.25, 0.25, 0.0]]
+        expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
+        for id1 in self.sids1:
+            for id2 in self.sids1:
+                npt.assert_almost_equal(dm1[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_weighted_unifrac(self):
+        # TODO: update npt.assert_almost_equal calls to use DistanceMatrix
+        # near-equality testing when that support is available
+        # expected values calculated by hand
+        dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
+                             otu_ids=self.oids1, tree=self.tree1)
+        dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
+                             otu_ids=self.oids1, tree=self.tree1)
+        self.assertEqual(dm1.shape, (3, 3))
+        self.assertEqual(dm1, dm2)
+        expected_data = [
+            [0.0, 0.1750000, 0.12499999],
+            [0.1750000, 0.0, 0.3000000],
+            [0.12499999, 0.3000000, 0.0]]
+        expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
+        for id1 in self.sids1:
+            for id2 in self.sids1:
+                npt.assert_almost_equal(dm1[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_weighted_unifrac_normalized(self):
+        # TODO: update npt.assert_almost_equal calls to use DistanceMatrix
+        # near-equality testing when that support is available
+        # expected values calculated by hand
+        dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
+                             otu_ids=self.oids1, tree=self.tree1,
+                             normalized=True)
+        dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
+                             otu_ids=self.oids1, tree=self.tree1,
+                             normalized=True)
+        self.assertEqual(dm1.shape, (3, 3))
+        self.assertEqual(dm1, dm2)
+        expected_data = [
+            [0.0, 0.128834, 0.085714],
+            [0.128834, 0.0, 0.2142857],
+            [0.085714, 0.2142857, 0.0]]
+        expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
+        for id1 in self.sids1:
+            for id2 in self.sids1:
+                npt.assert_almost_equal(dm1[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_scipy_kwargs(self):
+        # confirm that p can be passed to SciPy's minkowski, and that it
+        # gives a different result than not passing it (the off-diagonal
+        # entries are not equal).
+        dm1 = beta_diversity('minkowski', self.table1, self.sids1)
+        dm2 = beta_diversity('minkowski', self.table1, self.sids1, p=42.0)
+
+        for id1 in self.sids1:
+            for id2 in self.sids1:
+                if id1 != id2:
+                    self.assertNotEqual(dm1[id1, id2], dm2[id1, id2])
+
+    def test_alt_pairwise_func(self):
+        # confirm that pairwise_func is actually being used
+        def not_a_real_pdist(counts, metric):
+            return [[0.0, 42.0], [42.0, 0.0]]
+        dm1 = beta_diversity('unweighted_unifrac', self.table1,
+                             otu_ids=self.oids1, tree=self.tree1,
+                             pairwise_func=not_a_real_pdist)
+        expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
+        self.assertEqual(dm1, expected)
+
+        dm1 = beta_diversity('weighted_unifrac', self.table1,
+                             otu_ids=self.oids1, tree=self.tree1,
+                             pairwise_func=not_a_real_pdist)
+        expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
+        self.assertEqual(dm1, expected)
+
+        dm1 = beta_diversity(unweighted_unifrac, self.table1,
+                             otu_ids=self.oids1, tree=self.tree1,
+                             pairwise_func=not_a_real_pdist)
+        expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
+        self.assertEqual(dm1, expected)
+
+        dm1 = beta_diversity("euclidean", self.table1,
+                             pairwise_func=not_a_real_pdist)
+        expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
+        self.assertEqual(dm1, expected)
+
+
+class MetricGetters(TestCase):
+
+    def test_get_alpha_diversity_metrics(self):
+        m = get_alpha_diversity_metrics()
+        # basic sanity checks
+        self.assertTrue('faith_pd' in m)
+        self.assertTrue('chao1' in m)
+
+    def test_get_alpha_diversity_metrics_sorted(self):
+        m = get_alpha_diversity_metrics()
+        n = sorted(list(m))
+        self.assertEqual(m, n)
+
+    def test_get_beta_diversity_metrics(self):
+        m = get_beta_diversity_metrics()
+        # basic sanity checks
+        self.assertTrue('unweighted_unifrac' in m)
+        self.assertTrue('weighted_unifrac' in m)
+
+    def test_get_beta_diversity_metrics_sorted(self):
+        m = get_beta_diversity_metrics()
+        n = sorted(list(m))
+        self.assertEqual(m, n)
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/diversity/tests/test_util.py b/skbio/diversity/tests/test_util.py
new file mode 100644
index 0000000..e0f88d6
--- /dev/null
+++ b/skbio/diversity/tests/test_util.py
@@ -0,0 +1,240 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+import numpy.testing as npt
+
+from unittest import TestCase, main
+
+from skbio.io._fileobject import StringIO
+from skbio import TreeNode
+from skbio.diversity._util import (_validate_counts_vector,
+                                   _validate_counts_matrix,
+                                   _validate_otu_ids_and_tree,
+                                   _vectorize_counts_and_tree)
+from skbio.tree import DuplicateNodeError, MissingNodeError
+
+
+class ValidationTests(TestCase):
+
+    def test_validate_counts_vector(self):
+        # python list
+        obs = _validate_counts_vector([0, 2, 1, 3])
+        npt.assert_array_equal(obs, np.array([0, 2, 1, 3]))
+        self.assertEqual(obs.dtype, int)
+
+        # numpy array (no copy made)
+        data = np.array([0, 2, 1, 3])
+        obs = _validate_counts_vector(data)
+        npt.assert_array_equal(obs, data)
+        self.assertEqual(obs.dtype, int)
+        self.assertTrue(obs is data)
+
+        # single element
+        obs = _validate_counts_vector([42])
+        npt.assert_array_equal(obs, np.array([42]))
+        self.assertEqual(obs.dtype, int)
+        self.assertEqual(obs.shape, (1,))
+
+        # suppress casting to int
+        obs = _validate_counts_vector([42.2, 42.1, 0], suppress_cast=True)
+        npt.assert_array_equal(obs, np.array([42.2, 42.1, 0]))
+        self.assertEqual(obs.dtype, float)
+
+        # all zeros
+        obs = _validate_counts_vector([0, 0, 0])
+        npt.assert_array_equal(obs, np.array([0, 0, 0]))
+        self.assertEqual(obs.dtype, int)
+
+        # all zeros (single value)
+        obs = _validate_counts_vector([0])
+        npt.assert_array_equal(obs, np.array([0]))
+        self.assertEqual(obs.dtype, int)
+
+    def test_validate_counts_vector_invalid_input(self):
+        # wrong dtype
+        with self.assertRaises(TypeError):
+            _validate_counts_vector([0, 2, 1.2, 3])
+
+        # wrong number of dimensions (2-D)
+        with self.assertRaises(ValueError):
+            _validate_counts_vector([[0, 2, 1, 3], [4, 5, 6, 7]])
+
+        # wrong number of dimensions (scalar)
+        with self.assertRaises(ValueError):
+            _validate_counts_vector(1)
+
+        # negative values
+        with self.assertRaises(ValueError):
+            _validate_counts_vector([0, 0, 2, -1, 3])
+
+    def test_validate_counts_matrix(self):
+        # basic valid input (n=2)
+        obs = _validate_counts_matrix([[0, 1, 1, 0, 2],
+                                       [0, 0, 2, 1, 3]])
+        npt.assert_array_equal(obs[0], np.array([0, 1, 1, 0, 2]))
+        npt.assert_array_equal(obs[1], np.array([0, 0, 2, 1, 3]))
+
+        # basic valid input (n=3)
+        obs = _validate_counts_matrix([[0, 1, 1, 0, 2],
+                                       [0, 0, 2, 1, 3],
+                                       [1, 1, 1, 1, 1]])
+        npt.assert_array_equal(obs[0], np.array([0, 1, 1, 0, 2]))
+        npt.assert_array_equal(obs[1], np.array([0, 0, 2, 1, 3]))
+        npt.assert_array_equal(obs[2], np.array([1, 1, 1, 1, 1]))
+
+        # empty counts vectors
+        obs = _validate_counts_matrix(np.array([[], []], dtype=int))
+        npt.assert_array_equal(obs[0], np.array([]))
+        npt.assert_array_equal(obs[1], np.array([]))
+
+    def test_validate_counts_matrix_suppress_cast(self):
+        # suppress_cast is passed through to _validate_counts_vector
+        obs = _validate_counts_matrix(
+            [[42.2, 42.1, 0], [42.2, 42.1, 1.0]], suppress_cast=True)
+        npt.assert_array_equal(obs[0], np.array([42.2, 42.1, 0]))
+        npt.assert_array_equal(obs[1], np.array([42.2, 42.1, 1.0]))
+        self.assertEqual(obs[0].dtype, float)
+        self.assertEqual(obs[1].dtype, float)
+        with self.assertRaises(TypeError):
+            _validate_counts_matrix([[0.0], [1]], suppress_cast=False)
+
+    def test_validate_counts_matrix_negative_counts(self):
+        with self.assertRaises(ValueError):
+            _validate_counts_matrix([[0, 1, 1, 0, 2], [0, 0, 2, -1, 3]])
+        with self.assertRaises(ValueError):
+            _validate_counts_matrix([[0, 0, 2, -1, 3], [0, 1, 1, 0, 2]])
+
+    def test_validate_counts_matrix_unequal_lengths(self):
+        # len of vectors not equal
+        with self.assertRaises(ValueError):
+            _validate_counts_matrix([[0], [0, 0], [9, 8]])
+        with self.assertRaises(ValueError):
+            _validate_counts_matrix([[0, 0], [0, 0, 8], [9, 8]])
+        with self.assertRaises(ValueError):
+            _validate_counts_matrix([[0, 0, 75], [0, 0, 3], [9, 8, 22, 44]])
+
+    def test_validate_otu_ids_and_tree(self):
+        # basic valid input
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
+
+        # all tips observed
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 1, 1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5']
+        self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
+
+        # no tips observed
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = []
+        otu_ids = []
+        self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
+
+        # all counts zero
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [0, 0, 0, 0, 0]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5']
+        self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
+
+    def test_validate_otu_ids_and_tree_invalid_input(self):
+        # tree has duplicated tip ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+        counts = [1, 1, 1]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(DuplicateNodeError, _validate_otu_ids_and_tree,
+                          counts, otu_ids, t)
+
+        # unrooted tree as input
+        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                   u'OTU4:0.7);'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+
+        # otu_ids has duplicated ids
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU2']
+        self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+
+        # len of vectors not equal
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2']
+        self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+
+        # tree with no branch lengths
+        t = TreeNode.read(
+            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+
+        # tree missing some branch lengths
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU3']
+        self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+
+        # otu_ids not present in tree
+        t = TreeNode.read(
+            StringIO(u'(((((OTU1:0.25,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+        counts = [1, 2, 3]
+        otu_ids = ['OTU1', 'OTU2', 'OTU32']
+        self.assertRaises(MissingNodeError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+
+        # single node tree
+        t = TreeNode.read(StringIO(u'root;'))
+        counts = []
+        otu_ids = []
+        self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
+                          otu_ids, t)
+
+    def test_vectorize_counts_and_tree(self):
+        t = TreeNode.read(StringIO(u"((a:1, b:2)c:3)root;"))
+        counts = np.array([[0, 1], [1, 5], [10, 1]])
+        count_array, indexed, branch_lengths = \
+            _vectorize_counts_and_tree(counts, np.array(['a', 'b']), t)
+        exp_counts = np.array([[0, 1, 10], [1, 5, 1], [1, 6, 11], [1, 6, 11]])
+        npt.assert_equal(count_array, exp_counts.T)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/draw/__init__.py b/skbio/draw/__init__.py
deleted file mode 100644
index a2469b5..0000000
--- a/skbio/draw/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-Visualizations (:mod:`skbio.draw`)
-==================================
-
-.. currentmodule:: skbio.draw
-
-This module provides functionality for visualization of data.
-
-Distribution visualizations
----------------------------
-
-Functions
-^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   boxplots
-   grouped_distributions
-
-"""
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-from skbio.util import TestRunner
-
-from ._distributions import boxplots, grouped_distributions
-
-__all__ = ['boxplots', 'grouped_distributions']
-
-test = TestRunner(__file__).test
diff --git a/skbio/draw/_distributions.py b/skbio/draw/_distributions.py
deleted file mode 100644
index da9d3df..0000000
--- a/skbio/draw/_distributions.py
+++ /dev/null
@@ -1,711 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-from future.builtins import map, range, zip
-
-from itertools import cycle
-import warnings
-
-import numpy as np
-import matplotlib.pyplot as plt
-from matplotlib.lines import Line2D
-from matplotlib.patches import Polygon, Rectangle
-import six
-
-from skbio.util._decorator import deprecated
-
-distribution_plot_deprecation_p = {
-    'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
-        "Plots that are not specific to bioinformatics should be generated "
-        "with seaborn or another general-purpose plotting package."
-    )}
-
-
- at deprecated(**distribution_plot_deprecation_p)
-def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
-             x_label=None, y_label=None, x_tick_labels_orientation='vertical',
-             y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
-             box_colors=None, figure_width=None, figure_height=None,
-             legend=None):
-    """Generate a figure with a boxplot for each distribution.
-
-    Parameters
-    ----------
-    distributions: 2-D array_like
-        Distributions to plot. A boxplot will be created for each distribution.
-    x_values : list of numbers, optional
-        List indicating where each boxplot should be placed. Must be the same
-        length as `distributions` if provided.
-    x_tick_labels : list of str, optional
-        List of x-axis tick labels.
-    title : str, optional
-        Title of the plot.
-    x_label : str, optional
-        x-axis label.
-    y_label : str, optional
-        y-axis label.
-    x_tick_labels_orientation : {'vertical', 'horizontal'}
-        Orientation of the x-axis labels.
-    y_min : scalar, optional
-        Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
-    y_max : scalar, optional
-        Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
-    whisker_length : scalar, optional
-        Length of the whiskers as a function of the IQR. For example, if 1.5,
-        the whiskers extend to ``1.5 * IQR``. Anything outside of that range is
-        treated as an outlier.
-    box_width : scalar, optional
-        Width of each box in plot units.
-    box_colors : str, tuple, or list of colors, optional
-        Either a matplotlib-compatible string or tuple that indicates the color
-        to be used for every boxplot, or a list of colors to color each boxplot
-        individually. If ``None``, boxes will be the same color as the plot
-        background. If a list of colors is provided, a color must be provided
-        for each boxplot. Can also supply ``None`` instead of a color, which
-        will color the box the same color as the plot background.
-    figure_width : scalar, optional
-        Width of the plot figure in inches. If not provided, will default to
-        matplotlib's default figure width.
-    figure_height : scalar, optional
-        Height of the plot figure in inches. If not provided, will default to
-        matplotlib's default figure height.
-    legend : tuple or list, optional
-        Two-element tuple or list that contains a list of valid matplotlib
-        colors as the first element and a list of labels (strings) as the
-        second element. The lengths of the first and second elements must be
-        the same. If ``None``, a legend will not be plotted.
-
-    Returns
-    -------
-    matplotlib.figure.Figure
-        Figure containing a boxplot for each distribution.
-
-    See Also
-    --------
-    matplotlib.pyplot.boxplot
-    scipy.stats.ttest_ind
-
-    Notes
-    -----
-    This is a convenience wrapper around matplotlib's ``boxplot`` function that
-    allows for coloring of boxplots and legend generation.
-
-    Examples
-    --------
-    Create a plot with two boxplots:
-
-    .. plot::
-
-       >>> from skbio.draw import boxplots
-       >>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
-
-    Plot three distributions with custom colors and labels:
-
-    .. plot::
-
-       >>> from skbio.draw import boxplots
-       >>> fig = boxplots(
-       ...     [[2, 2, 1, 3], [0, -1, 0, 0.1, 0.3], [4, 5, 6, 3]],
-       ...     x_tick_labels=('Control', 'Treatment 1', 'Treatment 2'),
-       ...     box_colors=('green', 'blue', 'red'))
-
-    """
-    distributions = _validate_distributions(distributions)
-    num_dists = len(distributions)
-    _validate_x_values(x_values, x_tick_labels, num_dists)
-
-    # Create a new figure to plot our data on, and then plot the distributions.
-    fig, ax = plt.subplots()
-    box_plot = plt.boxplot(distributions, positions=x_values,
-                           whis=whisker_length, widths=box_width)
-
-    if box_colors is not None:
-        if _is_single_matplotlib_color(box_colors):
-            box_colors = [box_colors] * num_dists
-        _color_box_plot(ax, box_plot, box_colors)
-
-    # Set up the various plotting options, such as x- and y-axis labels, plot
-    # title, and x-axis values if they have been supplied.
-    _set_axes_options(ax, title, x_label, y_label,
-                      x_tick_labels=x_tick_labels,
-                      x_tick_labels_orientation=x_tick_labels_orientation,
-                      y_min=y_min, y_max=y_max)
-
-    if legend is not None:
-        if len(legend) != 2:
-            raise ValueError("Invalid legend was provided. The legend must be "
-                             "a two-element tuple/list where the first "
-                             "element is a list of colors and the second "
-                             "element is a list of labels.")
-        _create_legend(ax, legend[0], legend[1], 'colors')
-
-    _set_figure_size(fig, figure_width, figure_height)
-    return fig
-
-
- at deprecated(**distribution_plot_deprecation_p)
-def grouped_distributions(plot_type, data, x_values=None,
-                          data_point_labels=None, distribution_labels=None,
-                          distribution_markers=None, x_label=None,
-                          y_label=None, title=None,
-                          x_tick_labels_orientation='vertical', y_min=None,
-                          y_max=None, whisker_length=1.5,
-                          error_bar_type='stdv', distribution_width=None,
-                          figure_width=None, figure_height=None):
-    """Generate a figure with distributions grouped at points along the x-axis.
-
-    Parameters
-    ----------
-    plot_type : {'bar', 'scatter', 'box'}
-        Type of plot to visualize distributions with.
-    data : list of lists of lists
-        Each inner list represents a data point along the x-axis. Each data
-        point contains lists of data for each distribution in the group at that
-        point. This nesting allows for the grouping of distributions at each
-        data point.
-    x_values : list of scalars, optional
-        Spacing of data points along the x-axis. Must be the same length as the
-        number of data points and be in ascending sorted order. If not
-        provided, plots will be spaced evenly.
-    data_point_labels : list of str, optional
-        Labels for data points.
-    distribution_labels : list of str, optional
-        Labels for each distribution in a data point grouping.
-    distribution_markers : list of str or list of tuple, optional
-        Matplotlib-compatible strings or tuples that indicate the color or
-        symbol to be used to distinguish each distribution in a data point
-        grouping. Colors will be used for bar charts or box plots, while
-        symbols will be used for scatter plots.
-    x_label : str, optional
-        x-axis label.
-    y_label : str, optional
-        y-axis label.
-    title : str, optional
-        Plot title.
-    x_tick_labels_orientation : {'vertical', 'horizontal'}
-        Orientation of x-axis labels.
-    y_min : scalar, optional
-        Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
-    y_max : scalar, optional
-        Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
-    whisker_length : scalar, optional
-        If `plot_type` is ``'box'``, determines the length of the whiskers as a
-        function of the IQR. For example, if 1.5, the whiskers extend to
-        ``1.5 * IQR``. Anything outside of that range is seen as an outlier.
-        If `plot_type` is not ``'box'``, this parameter is ignored.
-    error_bar_type : {'stdv', 'sem'}
-        Type of error bars to use if `plot_type` is ``'bar'``. Can be either
-        ``'stdv'`` (for standard deviation) or ``'sem'`` for the standard error
-        of the mean. If `plot_type` is not ``'bar'``, this parameter is
-        ignored.
-    distribution_width : scalar, optional
-        Width in plot units of each individual distribution (e.g. each bar if
-        the plot type is a bar chart, or the width of each box if the plot type
-        is a boxplot). If None, will be automatically determined.
-    figure_width : scalar, optional
-        Width of the plot figure in inches. If not provided, will default to
-        matplotlib's default figure width.
-    figure_height : scalar, optional
-        Height of the plot figure in inches. If not provided, will default to
-        matplotlib's default figure height.
-
-    Returns
-    -------
-    matplotlib.figure.Figure
-        Figure containing distributions grouped at points along the x-axis.
-
-    Examples
-    --------
-    Create a plot with two distributions grouped at three points:
-
-    .. plot::
-
-       >>> from skbio.draw import grouped_distributions
-       >>> fig = grouped_distributions('bar',
-       ...                             [[[2, 2, 1,], [0, 1, 4]],
-       ...                             [[1, 1, 1], [4, 4.5]],
-       ...                             [[2.2, 2.4, 2.7, 1.0], [0, 0.2]]],
-       ...                             distribution_labels=['Treatment 1',
-       ...                                                  'Treatment 2'])
-
-    """
-    # Set up different behavior based on the plot type.
-    if plot_type == 'bar':
-        plotting_function = _plot_bar_data
-        distribution_centered = False
-        marker_type = 'colors'
-    elif plot_type == 'scatter':
-        plotting_function = _plot_scatter_data
-        distribution_centered = True
-        marker_type = 'symbols'
-    elif plot_type == 'box':
-        plotting_function = _plot_box_data
-        distribution_centered = True
-        marker_type = 'colors'
-    else:
-        raise ValueError("Invalid plot type '%s'. Supported plot types are "
-                         "'bar', 'scatter', or 'box'." % plot_type)
-
-    num_points, num_distributions = _validate_input(data, x_values,
-                                                    data_point_labels,
-                                                    distribution_labels)
-
-    # Create a list of matplotlib markers (colors or symbols) that can be used
-    # to distinguish each of the distributions. If the user provided a list of
-    # markers, use it and loop around to the beginning if there aren't enough
-    # markers. If they didn't provide a list, or it was empty, use our own
-    # predefined list of markers (again, loop around to the beginning if we
-    # need more markers).
-    distribution_markers = _get_distribution_markers(marker_type,
-                                                     distribution_markers,
-                                                     num_distributions)
-
-    # Now calculate where each of the data points will start on the x-axis.
-    x_locations = _calc_data_point_locations(num_points, x_values)
-    assert (len(x_locations) == num_points), "The number of x_locations " +\
-        "does not match the number of data points."
-
-    if distribution_width is None:
-        # Find the smallest gap between consecutive data points and divide this
-        # by the number of distributions + 1 for some extra spacing between
-        # data points.
-        min_gap = max(x_locations)
-        for i in range(len(x_locations) - 1):
-            curr_gap = x_locations[i + 1] - x_locations[i]
-            if curr_gap < min_gap:
-                min_gap = curr_gap
-
-        distribution_width = min_gap / float(num_distributions + 1)
-    else:
-        if distribution_width <= 0:
-            raise ValueError("The width of a distribution cannot be less than "
-                             "or equal to zero.")
-
-    result, plot_axes = plt.subplots()
-
-    # Iterate over each data point, and plot each of the distributions at that
-    # data point. Increase the offset after each distribution is plotted,
-    # so that the grouped distributions don't overlap.
-    for point, x_pos in zip(data, x_locations):
-        dist_offset = 0
-        for dist_index, dist, dist_marker in zip(range(num_distributions),
-                                                 point, distribution_markers):
-            dist_location = x_pos + dist_offset
-            plotting_function(plot_axes, dist, dist_marker, distribution_width,
-                              dist_location, whisker_length, error_bar_type)
-            dist_offset += distribution_width
-
-    # Set up various plot options that are best set after the plotting is done.
-    # The x-axis tick marks (one per data point) are centered on each group of
-    # distributions.
-    plot_axes.set_xticks(_calc_data_point_ticks(x_locations,
-                                                num_distributions,
-                                                distribution_width,
-                                                distribution_centered))
-    _set_axes_options(plot_axes, title, x_label, y_label, x_values,
-                      data_point_labels, x_tick_labels_orientation, y_min,
-                      y_max)
-
-    if distribution_labels is not None:
-        _create_legend(plot_axes, distribution_markers, distribution_labels,
-                       marker_type)
-
-    _set_figure_size(result, figure_width, figure_height)
-
-    # matplotlib seems to sometimes plot points on the rightmost edge of the
-    # plot without adding padding, so we need to add our own to both sides of
-    # the plot. For some reason this has to go after the call to draw(),
-    # otherwise matplotlib throws an exception saying it doesn't have a
-    # renderer. Boxplots need extra padding on the left.
-    if plot_type == 'box':
-        left_pad = 2 * distribution_width
-    else:
-        left_pad = distribution_width
-    plot_axes.set_xlim(plot_axes.get_xlim()[0] - left_pad,
-                       plot_axes.get_xlim()[1] + distribution_width)
-
-    return result
-
-
-def _validate_distributions(distributions):
-    dists = []
-    for distribution in distributions:
-        try:
-            distribution = np.asarray(distribution, dtype=float)
-        except ValueError:
-            raise ValueError("Each value in each distribution must be "
-                             "convertible to a number.")
-
-        # Empty distributions are plottable in mpl < 1.4.0. In 1.4.0, a
-        # ValueError is raised. This has been fixed in mpl 1.4.0-dev (see
-        # https://github.com/matplotlib/matplotlib/pull/3571). In order to
-        # support empty distributions across mpl versions, we replace them with
-        # [np.nan]. See https://github.com/pydata/pandas/issues/8382,
-        # https://github.com/matplotlib/matplotlib/pull/3571, and
-        # https://github.com/pydata/pandas/pull/8240 for details.
-        # If we decide to only support mpl > 1.4.0 in the future, this code can
-        # likely be removed in favor of letting mpl handle empty distributions.
-        if distribution.size > 0:
-            dists.append(distribution)
-        else:
-            dists.append(np.array([np.nan]))
-    return dists
-
-
-def _validate_input(data, x_values, data_point_labels, distribution_labels):
-    """Returns a tuple containing the number of data points and distributions
-    in the data.
-
-    Validates plotting options to make sure they are valid with the supplied
-    data.
-    """
-    if data is None or not data or isinstance(data, six.string_types):
-        raise ValueError("The data must be a list type, and it cannot be "
-                         "None or empty.")
-
-    num_points = len(data)
-    num_distributions = len(data[0])
-
-    empty_data_error_msg = ("The data must contain at least one data "
-                            "point, and each data point must contain at "
-                            "least one distribution to plot.")
-    if num_points == 0 or num_distributions == 0:
-        raise ValueError(empty_data_error_msg)
-
-    for point in data:
-        if len(point) == 0:
-            raise ValueError(empty_data_error_msg)
-        if len(point) != num_distributions:
-            raise ValueError("The number of distributions in each data point "
-                             "grouping must be the same for all data points.")
-
-    # Make sure we have the right number of x values (one for each data point),
-    # and make sure they are numbers.
-    _validate_x_values(x_values, data_point_labels, num_points)
-
-    if (distribution_labels is not None and
-            len(distribution_labels) != num_distributions):
-        raise ValueError("The number of distribution labels must be equal "
-                         "to the number of distributions.")
-    return num_points, num_distributions
-
-
-def _validate_x_values(x_values, x_tick_labels, num_expected_values):
-    """Validates the x values provided by the user, making sure they are the
-    correct length and are all numbers.
-
-    Also validates the number of x-axis tick labels.
-
-    Raises a ValueError if these conditions are not met.
-    """
-    if x_values is not None:
-        if len(x_values) != num_expected_values:
-            raise ValueError("The number of x values must match the number "
-                             "of data points.")
-        try:
-            list(map(float, x_values))
-        except:
-            raise ValueError("Each x value must be a number.")
-
-    if x_tick_labels is not None:
-        if len(x_tick_labels) != num_expected_values:
-            raise ValueError("The number of x-axis tick labels must match the "
-                             "number of data points.")
-
-
-def _get_distribution_markers(marker_type, marker_choices, num_markers):
-    """Returns a list of length num_markers of valid matplotlib colors or
-    symbols.
-
-    The markers will be comprised of those found in marker_choices (if not None
-    and not empty) or a list of predefined markers (determined by marker_type,
-    which can be either 'colors' or 'symbols'). If there are not enough
-    markers, the list of markers will be reused from the beginning again (as
-    many times as are necessary).
-    """
-    if num_markers < 0:
-        raise ValueError("num_markers must be greater than or equal to zero.")
-    if marker_choices is None or len(marker_choices) == 0:
-        if marker_type == 'colors':
-            marker_choices = ['b', 'g', 'r', 'c', 'm', 'y', 'w']
-        elif marker_type == 'symbols':
-            marker_choices = \
-                ['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']
-        else:
-            raise ValueError("Invalid marker_type: '%s'. marker_type must be "
-                             "either 'colors' or 'symbols'." % marker_type)
-    if len(marker_choices) < num_markers:
-        # We don't have enough markers to represent each distribution uniquely,
-        # so let the user know. We'll add as many markers (starting from the
-        # beginning of the list again) until we have enough, but the user
-        # should still know because they may want to provide a new list of
-        # markers.
-        warnings.warn(
-            "There are not enough markers to uniquely represent each "
-            "distribution in your dataset. You may want to provide a list "
-            "of markers that is at least as large as the number of "
-            "distributions in your dataset.",
-            RuntimeWarning)
-        marker_cycle = cycle(marker_choices[:])
-        while len(marker_choices) < num_markers:
-            marker_choices.append(next(marker_cycle))
-    return marker_choices[:num_markers]
-
-
-def _calc_data_point_locations(num_points, x_values=None):
-    """Returns the x-axis location for each of the data points to start at.
-
-    Note: A numpy array is returned so that the overloaded "+" operator can be
-    used on the array.
-
-    The x-axis locations are scaled by x_values if it is provided, or else the
-    x-axis locations are evenly spaced. In either case, the x-axis locations
-    will always be in the range [1, num_points].
-    """
-    if x_values is None:
-        # Evenly space the x-axis locations.
-        x_locs = np.arange(1, num_points + 1)
-    else:
-        if len(x_values) != num_points:
-            raise ValueError("The number of x-axis values must match the "
-                             "number of data points.")
-
-        # Scale to the range [1, num_points]. Taken from
-        # http://www.heatonresearch.com/wiki/Range_Normalization
-        x_min = min(x_values)
-        x_max = max(x_values)
-        x_range = x_max - x_min
-        n_range = num_points - 1
-        x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
-                           for x_val in x_values])
-
-    return x_locs
-
-
-def _calc_data_point_ticks(x_locations, num_distributions, distribution_width,
-                           distribution_centered):
-    """Returns a 1D numpy array of x-axis tick positions.
-
-    These positions will be centered on each data point.
-
-    Set distribution_centered to True for scatter and box plots because their
-    plot types naturally center over a given horizontal position. Bar charts
-    should use distribution_centered = False because the leftmost edge of a bar
-    starts at a given horizontal position and extends to the right for the
-    width of the bar.
-    """
-    dist_size = num_distributions - 1 if distribution_centered else\
-        num_distributions
-    return x_locations + ((dist_size * distribution_width) / 2)
-
-
-def _plot_bar_data(plot_axes, distribution, distribution_color,
-                   distribution_width, x_position, whisker_length,
-                   error_bar_type):
-    """Returns the result of plotting a single bar in matplotlib."""
-    result = None
-
-    # We do not want to plot empty distributions because matplotlib will not be
-    # able to render them as PDFs.
-    if len(distribution) > 0:
-        avg = np.mean(distribution)
-        if error_bar_type == 'stdv':
-            error_bar = np.std(distribution)
-        elif error_bar_type == 'sem':
-            error_bar = np.std(distribution) / np.sqrt(len(distribution))
-        else:
-            raise ValueError(
-                "Invalid error bar type '%s'. Supported error bar types are "
-                "'stdv' and 'sem'." % error_bar_type)
-        result = plot_axes.bar(x_position, avg, distribution_width,
-                               yerr=error_bar, ecolor='black',
-                               facecolor=distribution_color)
-    return result
-
-
-def _plot_scatter_data(plot_axes, distribution, distribution_symbol,
-                       distribution_width, x_position, whisker_length,
-                       error_bar_type):
-    """Returns the result of plotting a single scatterplot in matplotlib."""
-    result = None
-    x_vals = [x_position] * len(distribution)
-
-    # matplotlib's scatter function doesn't like plotting empty data.
-    if len(x_vals) > 0 and len(distribution) > 0:
-        result = plot_axes.scatter(x_vals, distribution,
-                                   marker=distribution_symbol, c='k')
-    return result
-
-
-def _plot_box_data(plot_axes, distribution, distribution_color,
-                   distribution_width, x_position, whisker_length,
-                   error_bar_type):
-    """Returns the result of plotting a single boxplot in matplotlib."""
-    result = None
-
-    if len(distribution) > 0:
-        result = plot_axes.boxplot([distribution], positions=[x_position],
-                                   widths=distribution_width,
-                                   whis=whisker_length)
-        _color_box_plot(plot_axes, result, [distribution_color])
-
-    return result
-
-
-def _is_single_matplotlib_color(color):
-    """Returns True if color is a single (not a list) mpl color."""
-    single_color = False
-
-    if (isinstance(color, six.string_types)):
-        single_color = True
-    elif len(color) == 3 or len(color) == 4:
-        single_color = True
-
-        for e in color:
-            if not (isinstance(e, float) or isinstance(e, int)):
-                single_color = False
-
-    return single_color
-
-
-def _color_box_plot(plot_axes, box_plot, colors):
-    """Color boxes in the box plot with the specified colors.
-
-    If any of the colors are None, the box will not be colored.
-
-    The box_plot argument must be the dictionary returned by the call to
-    matplotlib's boxplot function, and the colors argument must consist of
-    valid matplotlib colors.
-    """
-    # Note: the following code is largely taken from this matplotlib boxplot
-    # example:
-    # http://matplotlib.sourceforge.net/examples/pylab_examples/
-    #     boxplot_demo2.html
-    num_colors = len(colors)
-    num_box_plots = len(box_plot['boxes'])
-    if num_colors != num_box_plots:
-        raise ValueError("The number of colors (%d) does not match the number "
-                         "of boxplots (%d)." % (num_colors, num_box_plots))
-
-    for box, median, color in zip(box_plot['boxes'],
-                                  box_plot['medians'],
-                                  colors):
-        if color is not None:
-            box_x = []
-            box_y = []
-
-            # There are five points in the box. The first is the same as
-            # the last.
-            for i in range(5):
-                box_x.append(box.get_xdata()[i])
-                box_y.append(box.get_ydata()[i])
-
-            box_coords = list(zip(box_x, box_y))
-            box_polygon = Polygon(box_coords, facecolor=color)
-            plot_axes.add_patch(box_polygon)
-
-            # Draw the median lines back over what we just filled in with
-            # color.
-            median_x = []
-            median_y = []
-            for i in range(2):
-                median_x.append(median.get_xdata()[i])
-                median_y.append(median.get_ydata()[i])
-                plot_axes.plot(median_x, median_y, 'black')
-
-
-def _set_axes_options(plot_axes, title=None, x_label=None, y_label=None,
-                      x_values=None, x_tick_labels=None,
-                      x_tick_labels_orientation='vertical', y_min=None,
-                      y_max=None):
-    """Applies various labelling options to the plot axes."""
-    if title is not None:
-        plot_axes.set_title(title)
-    if x_label is not None:
-        plot_axes.set_xlabel(x_label)
-    if y_label is not None:
-        plot_axes.set_ylabel(y_label)
-
-    if (x_tick_labels_orientation != 'vertical' and
-            x_tick_labels_orientation != 'horizontal'):
-        raise ValueError("Invalid orientation for x-axis tick labels: '%s'. "
-                         "Valid orientations are 'vertical' or 'horizontal'."
-                         % x_tick_labels_orientation)
-
-    # If labels are provided, always use them. If they aren't, use the x_values
-    # that denote the spacing between data points as labels. If that isn't
-    # available, simply label the data points in an incremental fashion,
-    # i.e. 1, 2, 3, ..., n, where n is the number of data points on the plot.
-    if x_tick_labels is not None:
-        plot_axes.set_xticklabels(x_tick_labels,
-                                  rotation=x_tick_labels_orientation)
-    elif x_tick_labels is None and x_values is not None:
-        plot_axes.set_xticklabels(x_values, rotation=x_tick_labels_orientation)
-    else:
-        plot_axes.set_xticklabels(
-            range(1, len(plot_axes.get_xticklabels()) + 1),
-            rotation=x_tick_labels_orientation)
-
-    # Set the y-axis range if specified.
-    if y_min is not None:
-        plot_axes.set_ylim(bottom=float(y_min))
-    if y_max is not None:
-        plot_axes.set_ylim(top=float(y_max))
-
-
-def _create_legend(plot_axes, distribution_markers, distribution_labels,
-                   marker_type):
-    """Creates a legend on the supplied axes."""
-    # We have to use a proxy artist for the legend because box plots currently
-    # don't have a very useful legend in matplotlib, and using the default
-    # legend for bar/scatterplots chokes on empty/null distributions.
-    #
-    # Note: This code is based on the following examples:
-    #   http://matplotlib.sourceforge.net/users/legend_guide.html
-    #   http://stackoverflow.com/a/11423554
-    if len(distribution_markers) != len(distribution_labels):
-        raise ValueError("The number of distribution markers does not match "
-                         "the number of distribution labels.")
-    if marker_type == 'colors':
-        legend_proxy = [Rectangle((0, 0), 1, 1, fc=marker)
-                        for marker in distribution_markers]
-        plot_axes.legend(legend_proxy, distribution_labels, loc='best')
-    elif marker_type == 'symbols':
-        legend_proxy = [Line2D(range(1), range(1), color='white',
-                        markerfacecolor='black', marker=marker)
-                        for marker in distribution_markers]
-        plot_axes.legend(legend_proxy, distribution_labels, numpoints=3,
-                         scatterpoints=3, loc='best')
-    else:
-        raise ValueError("Invalid marker_type: '%s'. marker_type must be "
-                         "either 'colors' or 'symbols'." % marker_type)
-
-
-def _set_figure_size(fig, width=None, height=None):
-    """Sets the plot figure size and makes room for axis labels, titles, etc.
-
-    If both width and height are not provided, will use matplotlib defaults.
-
-    Making room for labels will not always work, and if it fails, the user will
-    be warned that their plot may have cut-off labels.
-    """
-    # Set the size of the plot figure, then make room for the labels so they
-    # don't get cut off. Must be done in this order.
-    if width is not None and height is not None and width > 0 and height > 0:
-        fig.set_size_inches(width, height)
-    try:
-        fig.tight_layout()
-    except ValueError:
-        warnings.warn(
-            "Could not automatically resize plot to make room for "
-            "axes labels and plot title. This can happen if the labels or "
-            "title are extremely long and the plot size is too small. Your "
-            "plot may have its labels and/or title cut-off. To fix this, "
-            "try increasing the plot's size (in inches) and try again.",
-            RuntimeWarning)
diff --git a/skbio/draw/tests/test_distributions.py b/skbio/draw/tests/test_distributions.py
deleted file mode 100644
index 2ffeda6..0000000
--- a/skbio/draw/tests/test_distributions.py
+++ /dev/null
@@ -1,595 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-import warnings
-from unittest import TestCase, main
-
-import numpy as np
-import numpy.testing as npt
-import matplotlib.pyplot as plt
-
-from skbio.draw import boxplots, grouped_distributions
-from skbio.draw._distributions import (
-    _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot,
-    _create_legend, _get_distribution_markers, _is_single_matplotlib_color,
-    _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options,
-    _set_figure_size, _validate_input, _validate_x_values)
-
-
-class DistributionsTests(TestCase):
-    def setUp(self):
-        # Test null data list.
-        self.Null = None
-
-        # Test empty data list.
-        self.Empty = []
-
-        # Test nested empty data list.
-        self.EmptyNested = [[]]
-
-        # Test nested empty data list (for bar/scatter plots).
-        self.EmptyDeeplyNested = [[[]]]
-
-        # Test invalid number of samples in data list (for bar/scatter plots).
-        self.InvalidNumSamples = [[[1, 2, 3, 4, 5]],
-                                  [[4, 5, 6, 7, 8], [2, 3, 2]],
-                                  [[4, 7, 10, 33, 32, 6, 7, 8]]]
-
-        # Test valid data with three samples and four data points
-        # (for bar/scatter plots).
-        self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]],
-                                 [[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]],
-                                 [[4, 33, 32, 6, 8], [5, 4, 8, 13], [1, 1, 2]],
-                                 [[2, 2, 2, 2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]]
-
-        # Test valid data with one sample (for bar/scatter plots).
-        self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]],
-                                      [[4, 5, 6, 7, 8]],
-                                      [[4, 7, 10, 33, 32, 6, 7, 8]]]
-
-        # Test typical data to be plotted by the boxplot function.
-        self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99],
-                                    [2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3, -8],
-                                    [2, 9, 7, 5, 6]]
-
-    def tearDown(self):
-        # We get a warning from mpl if we don't clean up our figures.
-        plt.close('all')
-
-    def test_validate_input_null(self):
-        with npt.assert_raises(ValueError):
-            _validate_input(self.Null, None, None, None)
-
-    def test_validate_input_empty(self):
-        with npt.assert_raises(ValueError):
-            _validate_input(self.Empty, None, None, None)
-
-    def test_validate_input_empty_nested(self):
-        with npt.assert_raises(ValueError):
-            _validate_input(self.EmptyNested, None, None, None)
-
-    def test_validate_input_empty_deeply_nested(self):
-        num_points, num_samples = _validate_input(self.EmptyDeeplyNested,
-                                                  None, None, None)
-        self.assertEqual(num_points, 1)
-        self.assertEqual(num_samples, 1)
-
-    def test_validate_input_empty_point(self):
-        with npt.assert_raises(ValueError):
-            _validate_input([[[1, 2, 3], [4, 5]], []], None, None, None)
-
-    def test_validate_input_invalid_num_samples(self):
-        with npt.assert_raises(ValueError):
-            _validate_input(self.InvalidNumSamples, None, None, None)
-
-    def test_validate_input_invalid_data_point_names(self):
-        with npt.assert_raises(ValueError):
-            _validate_input(self.ValidSingleSampleData, None, ["T0", "T1"],
-                            None)
-
-    def test_validate_input_invalid_sample_names(self):
-        with npt.assert_raises(ValueError):
-            _validate_input(self.ValidSingleSampleData, None, None,
-                            ["Men", "Women"])
-
-    def test_validate_input_all_valid_input(self):
-        self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8],
-                                         ["T0", "T1", "T2", "T3"],
-                                         ["Infants", "Children", "Teens"]),
-                         (4, 3))
-
-    def test_validate_x_values_invalid_x_values(self):
-        with npt.assert_raises(ValueError):
-            _validate_x_values([1, 2, 3, 4], ["T0", "T1", "T2"],
-                               len(self.ValidSingleSampleData))
-
-    def test_validate_x_values_invalid_x_tick_labels(self):
-        with npt.assert_raises(ValueError):
-            _validate_x_values(None, ["T0"], len(self.ValidSingleSampleData))
-
-    def test_validate_x_values_nonnumber_x_values(self):
-        with npt.assert_raises(ValueError):
-            _validate_x_values(["foo", 2, 3], None,
-                               len(self.ValidSingleSampleData))
-
-    def test_validate_x_values_valid_x_values(self):
-        _validate_x_values([1, 2.0, 3], None, 3)
-
-    def test_get_distribution_markers_null_marker_list(self):
-        self.assertEqual(_get_distribution_markers('colors', None, 5),
-                         ['b', 'g', 'r', 'c', 'm'])
-
-    def test_get_distribution_markers_empty_marker_list(self):
-        self.assertEqual(_get_distribution_markers('colors', None, 4),
-                         ['b', 'g', 'r', 'c'])
-
-    def test_get_distribution_markers_insufficient_markers(self):
-
-        expected = ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r']
-        # adapted from SO example here: http://stackoverflow.com/a/3892301
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter("always")
-            actual = _get_distribution_markers('colors', None, 10)
-            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
-            self.assertEqual(actual, expected)
-
-        expected = ['^', '>', '<', '^', '>']
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter("always")
-            actual = _get_distribution_markers('symbols', ['^', '>', '<'], 5)
-            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
-            self.assertEqual(actual, expected)
-
-    def test_get_distribution_markers_bad_marker_type(self):
-        with npt.assert_raises(ValueError):
-            _get_distribution_markers('shapes', [], 3)
-
-    def test_get_distribution_markers_zero_markers(self):
-        self.assertEqual(_get_distribution_markers('symbols', None, 0), [])
-        self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), [])
-
-    def test_get_distribution_markers_negative_num_markers(self):
-        with npt.assert_raises(ValueError):
-            _get_distribution_markers('symbols', [], -1)
-
-    def test_plot_bar_data(self):
-        fig, ax = plt.subplots()
-        result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv')
-        self.assertEqual(result[0].__class__.__name__, "Rectangle")
-        self.assertEqual(len(result), 1)
-        self.assertAlmostEqual(result[0].get_width(), 0.5)
-        self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
-        self.assertAlmostEqual(result[0].get_height(), 2.0)
-
-        fig, ax = plt.subplots()
-        result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem')
-        self.assertEqual(result[0].__class__.__name__, "Rectangle")
-        self.assertEqual(len(result), 1)
-        self.assertAlmostEqual(result[0].get_width(), 0.5)
-        self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
-        self.assertAlmostEqual(result[0].get_height(), 2.0)
-
-    def test_plot_bar_data_bad_error_bar_type(self):
-        fig, ax = plt.subplots()
-        with npt.assert_raises(ValueError):
-            _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var')
-
-    def test_plot_bar_data_empty(self):
-        fig, ax = plt.subplots()
-        result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv')
-        self.assertTrue(result is None)
-
-        fig, ax = plt.subplots()
-        result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem')
-        self.assertTrue(result is None)
-
-    def test_plot_scatter_data(self):
-        fig, ax = plt.subplots()
-        result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv')
-        self.assertEqual(result.get_sizes(), 20)
-
-    def test_plot_scatter_data_empty(self):
-        fig, ax = plt.subplots()
-        result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv')
-        self.assertTrue(result is None)
-
-    def test_plot_box_data(self):
-        fig, ax = plt.subplots()
-        result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55,
-                                1.5, 'stdv')
-        self.assertEqual(result.__class__.__name__, "dict")
-        self.assertEqual(len(result['boxes']), 1)
-        self.assertEqual(len(result['medians']), 1)
-        self.assertEqual(len(result['whiskers']), 2)
-
-        # mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one,
-        # though the resulting plot looks identical between the two versions.
-        # see:
-        #   https://github.com/pydata/pandas/issues/8382#issuecomment-56840974
-        #   https://github.com/matplotlib/matplotlib/issues/3544
-        self.assertTrue(len(result['fliers']) == 1 or
-                        len(result['fliers']) == 2)
-
-        self.assertEqual(len(result['caps']), 2)
-
-    def test_plot_box_data_empty(self):
-        fig, ax = plt.subplots()
-        result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv')
-        self.assertTrue(result is None)
-
-    def test_calc_data_point_locations_invalid_x_values(self):
-        with npt.assert_raises(ValueError):
-            _calc_data_point_locations(3, [1, 10.5])
-
-    def test_calc_data_point_locations_default_spacing(self):
-        locs = _calc_data_point_locations(4)
-        np.testing.assert_allclose(locs, [1, 2, 3, 4])
-
-    def test_calc_data_point_locations_custom_spacing(self):
-        # Scaling down from 3..12 to 1..4.
-        locs = _calc_data_point_locations(4, [3, 4, 10, 12])
-        np.testing.assert_allclose(locs,
-                                   np.array([1, 1.33333333, 3.33333333, 4]))
-
-        # Sorted order shouldn't affect scaling.
-        locs = _calc_data_point_locations(4, [4, 3, 12, 10])
-        np.testing.assert_allclose(locs,
-                                   np.array([1.33333333, 1, 4, 3.33333333]))
-
-        # Scaling up from 0.001..0.87 to 1..3.
-        locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87])
-        np.testing.assert_allclose(locs,
-                                   np.array([1, 1.58296893, 3]))
-
-    def test_calc_data_point_ticks(self):
-        ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False)
-        np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25])
-
-        ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False)
-        np.testing.assert_allclose(ticks, [0.75])
-
-    def test_set_axes_options(self):
-        fig, ax = plt.subplots()
-        _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
-                          x_tick_labels=["T0", "T1"])
-        self.assertEqual(ax.get_title(), "Plot Title")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
-        self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
-
-    def test_set_axes_options_ylim(self):
-        fig, ax = plt.subplots()
-        _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
-                          x_tick_labels=["T0", "T1", "T2"], y_min=0, y_max=1)
-        self.assertEqual(ax.get_title(), "Plot Title")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
-        self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
-        self.assertEqual(ax.get_ylim(), (0.0, 1.0))
-
-    def test_set_axes_options_x_values_as_tick_labels(self):
-        fig, ax = plt.subplots()
-        _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
-                          x_values=[42, 45, 800])
-
-        self.assertEqual(ax.get_title(), "Plot Title")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(ax.get_xticklabels()[0].get_text(), '42')
-        self.assertEqual(ax.get_xticklabels()[1].get_text(), '45')
-        self.assertEqual(ax.get_xticklabels()[2].get_text(), '800')
-
-    def test_set_axes_options_bad_ylim(self):
-        fig, ax = plt.subplots()
-        with npt.assert_raises(ValueError):
-            _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
-                              x_tick_labels=["T0", "T1", "T2"], y_min='car',
-                              y_max=30)
-
-    def test_set_axes_options_invalid_x_tick_labels_orientation(self):
-        fig, ax = plt.subplots()
-        with npt.assert_raises(ValueError):
-            _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
-                              x_tick_labels=["T0", "T1"],
-                              x_tick_labels_orientation='brofist')
-
-    def test_create_legend(self):
-        fig, ax = plt.subplots()
-        _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors')
-        self.assertEqual(len(ax.get_legend().get_texts()), 2)
-
-        fig, ax = plt.subplots()
-        _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
-                       'symbols')
-        self.assertEqual(len(ax.get_legend().get_texts()), 3)
-
-    def test_create_legend_invalid_input(self):
-        fig, ax = plt.subplots()
-        with npt.assert_raises(ValueError):
-            _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols')
-        with npt.assert_raises(ValueError):
-            _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
-                           'foo')
-
-    def test_grouped_distributions_bar(self):
-        fig = grouped_distributions('bar', self.ValidTypicalData,
-                                    [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
-                                    ["Infants", "Children", "Teens"],
-                                    ['b', 'r', 'g'], "x-axis label",
-                                    "y-axis label", "Test")
-        ax = fig.get_axes()[0]
-        self.assertEqual(ax.get_title(), "Test")
-        self.assertEqual(ax.get_xlabel(), "x-axis label")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(len(ax.get_xticklabels()), 4)
-        np.testing.assert_allclose(ax.get_xticks(),
-                                   [1.1125, 2.0125, 3.8125, 4.1125])
-
-    def test_grouped_distributions_insufficient_colors(self):
-        args = ('bar', self.ValidTypicalData, [1, 4, 10, 11],
-                ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
-                ['b', 'r'], "x-axis label", "y-axis label", "Test")
-
-        # adapted from SO example here: http://stackoverflow.com/a/3892301
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter("always")
-            grouped_distributions(*args)
-            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
-
-    def test_grouped_distributions_scatter(self):
-        fig = grouped_distributions('scatter', self.ValidTypicalData,
-                                    [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
-                                    ["Infants", "Children", "Teens"],
-                                    ['^', '>', '<'], "x-axis label",
-                                    "y-axis label", "Test")
-        ax = fig.get_axes()[0]
-        self.assertEqual(ax.get_title(), "Test")
-        self.assertEqual(ax.get_xlabel(), "x-axis label")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(len(ax.get_xticklabels()), 4)
-        np.testing.assert_allclose(ax.get_xticks(),
-                                   [1.075, 1.975, 3.775, 4.075])
-
-    def test_grouped_distributions_insufficient_symbols(self):
-        args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11],
-                ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
-                ['^'], "x-axis label", "y-axis label", "Test")
-
-        # adapted from SO example here: http://stackoverflow.com/a/3892301
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter("always")
-            grouped_distributions(*args)
-            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
-
-    def test_grouped_distributions_empty_marker_list(self):
-        grouped_distributions('scatter', self.ValidTypicalData,
-                              [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
-                              ["Infants", "Children", "Teens"], [],
-                              "x-axis label", "y-axis label", "Test")
-
-    def test_grouped_distributions_box(self):
-        fig = grouped_distributions('box', self.ValidTypicalData,
-                                    [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
-                                    ["Infants", "Children", "Teens"],
-                                    ['b', 'g', 'y'], "x-axis label",
-                                    "y-axis label", "Test")
-        ax = fig.get_axes()[0]
-        self.assertEqual(ax.get_title(), "Test")
-        self.assertEqual(ax.get_xlabel(), "x-axis label")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(len(ax.get_xticklabels()), 4)
-        np.testing.assert_allclose(ax.get_xticks(),
-                                   [1.075, 1.975, 3.775, 4.075])
-
-    def test_grouped_distributions_error(self):
-        with npt.assert_raises(ValueError):
-            grouped_distributions('pie', self.ValidTypicalData,
-                                  [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
-                                  ["Infants", "Children", "Teens"],
-                                  ['b', 'g', 'y'],
-                                  "x-axis label", "y-axis label", "Test")
-
-    def test_grouped_distributions_negative_distribution_width(self):
-        args = ('box', self.ValidTypicalData, [1, 4, 10, 11],
-                ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
-                ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test")
-
-        with self.assertRaises(ValueError):
-            grouped_distributions(*args, distribution_width=0)
-
-        with self.assertRaises(ValueError):
-            grouped_distributions(*args, distribution_width=-42)
-
-    def test_boxplots(self):
-        fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10],
-                       ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
-                       "y-axis label",
-                       legend=(('blue', 'red'), ('foo', 'bar')))
-        ax = fig.get_axes()[0]
-        self.assertEqual(ax.get_title(), "Test")
-        self.assertEqual(ax.get_xlabel(), "x-axis label")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(len(ax.get_xticklabels()), 3)
-        self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
-
-    def test_boxplots_empty_distributions(self):
-        fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10],
-                       ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
-                       "y-axis label")
-        ax = fig.get_axes()[0]
-        self.assertEqual(ax.get_title(), "Test")
-        self.assertEqual(ax.get_xlabel(), "x-axis label")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(len(ax.get_xticklabels()), 3)
-        self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
-
-        # second distribution (empty) should have nans since it is hidden.
-        # boxplots in mpl < 1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has
-        # 7. in either case, the line at index 8 should have a nan for its y
-        # value
-        lines = ax.get_lines()
-        self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
-        # line in first distribution should *not* have nan for its y value
-        self.assertFalse(np.isnan(lines[0].get_xydata()[0][1]))
-
-        # All distributions are empty.
-        fig = boxplots([[], [], []], [1, 4, 10],
-                       ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
-                       "y-axis label")
-        ax = fig.get_axes()[0]
-        self.assertEqual(ax.get_title(), "Test")
-        self.assertEqual(ax.get_xlabel(), "x-axis label")
-        self.assertEqual(ax.get_ylabel(), "y-axis label")
-        self.assertEqual(len(ax.get_xticklabels()), 3)
-        self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
-
-        lines = ax.get_lines()
-        self.assertTrue(np.isnan(lines[0].get_xydata()[0][1]))
-        self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
-        self.assertTrue(np.isnan(lines[16].get_xydata()[0][1]))
-
-    def test_boxplots_box_colors(self):
-        # Coloring works with all empty distributions.
-        fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow'])
-        ax = fig.get_axes()[0]
-        self.assertEqual(len(ax.get_xticklabels()), 3)
-        # patch colors should match what we specified
-        self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
-        self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
-        self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
-        # patch location should include at least one nan since the distribution
-        # is empty, and thus hidden
-        for patch in ax.patches:
-            self.assertTrue(np.isnan(patch.xy[0][1]))
-
-        fig = boxplots([[], [], []], box_colors='pink')
-        ax = fig.get_axes()[0]
-        self.assertEqual(len(ax.get_xticklabels()), 3)
-        for patch in ax.patches:
-            npt.assert_almost_equal(
-                patch.get_facecolor(),
-                (1.0, 0.7529411764705882, 0.796078431372549, 1.0))
-            self.assertTrue(np.isnan(patch.xy[0][1]))
-
-        # Coloring works with some empty distributions.
-        fig = boxplots([[], [1, 2, 3.5], []],
-                       box_colors=['blue', 'red', 'yellow'])
-        ax = fig.get_axes()[0]
-        self.assertEqual(len(ax.get_xticklabels()), 3)
-        self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
-        self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
-        self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
-        self.assertTrue(np.isnan(ax.patches[0].xy[0][1]))
-        self.assertFalse(np.isnan(ax.patches[1].xy[0][1]))
-        self.assertTrue(np.isnan(ax.patches[2].xy[0][1]))
-
-    def test_boxplots_invalid_input(self):
-        # Non-numeric entries in distribution.
-        with npt.assert_raises(ValueError):
-            boxplots([[1, 'foo', 3]])
-
-        # Number of colors doesn't match number of distributions.
-        with npt.assert_raises(ValueError):
-            boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red'])
-
-        # Invalid legend.
-        with npt.assert_raises(ValueError):
-            boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz'))
-
-    def test_color_box_plot(self):
-        fig, ax = plt.subplots()
-        box_plot = plt.boxplot(self.ValidTypicalBoxData)
-        _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)])
-
-        # Some colors are None.
-        fig, ax = plt.subplots()
-        box_plot = plt.boxplot(self.ValidTypicalBoxData)
-        _color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)])
-
-        # All colors are None.
-        fig, ax = plt.subplots()
-        box_plot = plt.boxplot(self.ValidTypicalBoxData)
-        _color_box_plot(ax, box_plot, [None, None, None])
-
-    def test_color_box_plot_invalid_input(self):
-        # Invalid color.
-        fig, ax = plt.subplots()
-        box_plot = plt.boxplot(self.ValidTypicalBoxData)
-        with npt.assert_raises(ValueError):
-            _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue'])
-
-        # Wrong number of colors.
-        fig, ax = plt.subplots()
-        box_plot = plt.boxplot(self.ValidTypicalBoxData)
-        with npt.assert_raises(ValueError):
-            _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)])
-
-    def test_is_single_matplotlib_color(self):
-        self.assertTrue(_is_single_matplotlib_color('w'))
-        self.assertTrue(_is_single_matplotlib_color('white'))
-        self.assertTrue(_is_single_matplotlib_color([1, 1, 1]))
-        self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1]))
-        self.assertTrue(_is_single_matplotlib_color((1, 1, 1)))
-        self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1)))
-        self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0)))
-        self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0)))
-        self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0)))
-
-        self.assertFalse(_is_single_matplotlib_color(['w', 'r']))
-        self.assertFalse(_is_single_matplotlib_color(['w']))
-        self.assertFalse(_is_single_matplotlib_color(('w',)))
-        self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),)))
-        self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),
-                                                      (0.9, 0.9))))
-
-    def test_set_figure_size(self):
-        fig, ax = plt.subplots()
-        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
-                          x_tick_labels=['foofoofoo', 'barbarbar'],
-                          x_tick_labels_orientation='vertical')
-        _set_figure_size(fig, 3, 4)
-        self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4)))
-
-    def test_set_figure_size_defaults(self):
-        fig, ax = plt.subplots()
-        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
-                          x_tick_labels=['foofoofoo', 'barbarbar'],
-                          x_tick_labels_orientation='vertical')
-        orig_fig_size = fig.get_size_inches()
-        _set_figure_size(fig)
-        self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
-
-    def test_set_figure_size_invalid(self):
-        fig, ax = plt.subplots()
-        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
-                          x_tick_labels=['foofoofoo', 'barbarbar'],
-                          x_tick_labels_orientation='vertical')
-        orig_fig_size = fig.get_size_inches()
-        _set_figure_size(fig, -1, 0)
-        self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
-
-    def test_set_figure_size_long_labels(self):
-        fig, ax = plt.subplots()
-        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
-                          x_tick_labels=['foofoofooooooooooooooooooooooooo'
-                                         'oooooooooooooooooooooooooooooooo'
-                                         'oooooooooooooooooooooooooooooooo'
-                                         'oooo', 'barbarbar'],
-                          x_tick_labels_orientation='vertical')
-
-        # adapted from SO example here: http://stackoverflow.com/a/3892301
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter("always")
-            _set_figure_size(fig, 3, 3)
-            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
-        npt.assert_array_equal(fig.get_size_inches(), (3, 3))
-
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/io/__init__.py b/skbio/io/__init__.py
index 71600d1..3719814 100644
--- a/skbio/io/__init__.py
+++ b/skbio/io/__init__.py
@@ -15,6 +15,8 @@ see the associated documentation.
 .. autosummary::
    :toctree: generated/
 
+   blast6
+   blast7
    clustal
    fasta
    fastq
@@ -50,6 +52,7 @@ User exceptions and warnings
    UnrecognizedFormatError
    IOSourceError
    FileFormatError
+   BLAST7FormatError
    ClustalFormatError
    FASTAFormatError
    FASTQFormatError
@@ -77,12 +80,11 @@ Reading and writing files (I/O) can be a complicated task:
 
 * A file format can sometimes be read into more than one in-memory
   representation (i.e., object). For example, a FASTA file can be read into an
-  :mod:`skbio.alignment.SequenceCollection` or :mod:`skbio.alignment.Alignment`
-  depending on the file's contents and what operations you'd like to perform on
-  your data.
+  :mod:`skbio.alignment.TabularMSA` or :mod:`skbio.sequence.DNA` depending on
+  what operations you'd like to perform on your data.
 * A single object might be writeable to more than one file format. For example,
-  an :mod:`skbio.alignment.Alignment` object could be written to FASTA, FASTQ,
-  QSEQ, or PHYLIP formats, just to name a few.
+  an :mod:`skbio.alignment.TabularMSA` object could be written to FASTA, FASTQ,
+  CLUSTAL, or PHYLIP formats, just to name a few.
 * You might not know the exact file format of your file, but you want to read
   it into an appropriate object.
 * You might want to read multiple files into a single object, or write an
@@ -120,14 +122,14 @@ For example, to read a `newick` file using both interfaces you would type:
 >>> from skbio import read
 >>> from skbio import TreeNode
 >>> from io import StringIO
->>> open_filehandle = StringIO(u'(a, b);')
+>>> open_filehandle = StringIO('(a, b);')
 >>> tree = read(open_filehandle, format='newick', into=TreeNode)
 >>> tree
 <TreeNode, name: unnamed, internal node count: 0, tips count: 2>
 
 For the OO interface:
 
->>> open_filehandle = StringIO(u'(a, b);')
+>>> open_filehandle = StringIO('(a, b);')
 >>> tree = TreeNode.read(open_filehandle, format='newick')
 >>> tree
 <TreeNode, name: unnamed, internal node count: 0, tips count: 2>
@@ -143,7 +145,7 @@ that `format` may be omitted there as well.
 
 As an example:
 
->>> open_filehandle = StringIO(u'(a, b);')
+>>> open_filehandle = StringIO('(a, b);')
 >>> tree = TreeNode.read(open_filehandle)
 >>> tree
 <TreeNode, name: unnamed, internal node count: 0, tips count: 2>
@@ -195,10 +197,11 @@ from skbio.util import TestRunner
 
 from ._warning import FormatIdentificationWarning, ArgumentOverrideWarning
 from ._exception import (UnrecognizedFormatError, FileFormatError,
-                         ClustalFormatError, FASTAFormatError,
-                         IOSourceError, FASTQFormatError, LSMatFormatError,
-                         NewickFormatError, OrdinationFormatError,
-                         PhylipFormatError, QSeqFormatError, QUALFormatError)
+                         BLAST7FormatError, ClustalFormatError,
+                         FASTAFormatError, GenBankFormatError, IOSourceError,
+                         FASTQFormatError, LSMatFormatError, NewickFormatError,
+                         OrdinationFormatError, PhylipFormatError,
+                         QSeqFormatError, QUALFormatError)
 from .registry import write, read, sniff, create_format, io_registry
 from .util import open
 
@@ -208,9 +211,11 @@ __all__ = ['write', 'read', 'sniff', 'open', 'io_registry', 'create_format',
            'UnrecognizedFormatError', 'IOSourceError',
 
            'FileFormatError',
+           'BLAST7FormatError',
            'ClustalFormatError',
            'FASTAFormatError',
            'FASTQFormatError',
+           'GenBankFormatError',
            'LSMatFormatError',
            'NewickFormatError',
            'OrdinationFormatError',
@@ -222,6 +227,8 @@ __all__ = ['write', 'read', 'sniff', 'open', 'io_registry', 'create_format',
 # Necessary to import each file format module to have them added to the I/O
 # registry. We use import_module instead of a typical import to avoid flake8
 # unused import errors.
+import_module('skbio.io.format.blast6')
+import_module('skbio.io.format.blast7')
 import_module('skbio.io.format.clustal')
 import_module('skbio.io.format.fasta')
 import_module('skbio.io.format.fastq')
@@ -230,7 +237,7 @@ import_module('skbio.io.format.newick')
 import_module('skbio.io.format.ordination')
 import_module('skbio.io.format.phylip')
 import_module('skbio.io.format.qseq')
-
+import_module('skbio.io.format.genbank')
 
 # This is meant to be a handy indicator to the user that they have done
 # something wrong.
diff --git a/skbio/io/_exception.py b/skbio/io/_exception.py
index 36af916..2db661d 100644
--- a/skbio/io/_exception.py
+++ b/skbio/io/_exception.py
@@ -24,6 +24,16 @@ class UnrecognizedFormatError(FileFormatError):
     pass
 
 
+class GenBankFormatError(FileFormatError):
+    """Raised when a ``genbank`` formatted file cannot be parsed."""
+    pass
+
+
+class BLAST7FormatError(FileFormatError):
+    """Raised when a ``blast7`` formatted file cannot be parsed."""
+    pass
+
+
 class ClustalFormatError(FileFormatError):
     """Raised when a ``clustal`` formatted file cannot be parsed."""
     pass
@@ -62,7 +72,7 @@ class FASTQFormatError(FileFormatError):
 class PhylipFormatError(FileFormatError):
     """Raised when a ``phylip`` formatted file cannot be parsed.
 
-    May also be raised when an object (e.g., ``Alignment``) cannot be written
+    May also be raised when an object (e.g., ``TabularMSA``) cannot be written
     in ``phylip`` format.
 
     """
diff --git a/skbio/io/_iosources.py b/skbio/io/_iosources.py
index 2e92d5c..2cdf15e 100644
--- a/skbio/io/_iosources.py
+++ b/skbio/io/_iosources.py
@@ -7,8 +7,7 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-
-from six import string_types, text_type
+import six
 
 import io
 import gzip
@@ -80,7 +79,7 @@ class Compressor(IOSource):
 
 class FilePathSource(IOSource):
     def can_read(self):
-        return isinstance(self.file, string_types)
+        return isinstance(self.file, six.string_types)
 
     def can_write(self):
         return self.can_read()
@@ -95,7 +94,7 @@ class FilePathSource(IOSource):
 class HTTPSource(IOSource):
     def can_read(self):
         return (
-            isinstance(self.file, string_types) and
+            isinstance(self.file, six.string_types) and
             requests.compat.urlparse(self.file).scheme in {'http', 'https'})
 
     def get_reader(self):
@@ -169,11 +168,17 @@ class IterableSource(IOSource):
             if head is None:
                 self.repaired = []
                 return True
-            if isinstance(head, text_type):
+            if isinstance(head, six.text_type):
                 self.repaired = itertools.chain([head], iterator)
                 return True
             else:
                 # We may have mangled a generator at this point, so just abort
+                if six.PY2 and isinstance(head, bytes):
+                    raise IOSourceError(
+                        "Could not open source: %r (mode: %r).\n Prepend a "
+                        r"`u` to the strings (e.g. [u'line1\n', u'line2\n'])" %
+                        (self.file, self.options['mode']))
+
                 raise IOSourceError(
                     "Could not open source: %r (mode: %r)" %
                     (self.file, self.options['mode']))
diff --git a/skbio/io/format/_base.py b/skbio/io/format/_base.py
index e031c9b..9f8be9c 100644
--- a/skbio/io/format/_base.py
+++ b/skbio/io/format/_base.py
@@ -164,7 +164,7 @@ def _format_fasta_like_records(generator, id_whitespace_replacement,
             id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
 
         if 'description' in seq.metadata:
-            desc = seq.metadata['description']
+            desc = '%s' % seq.metadata['description']
         else:
             desc = ''
 
@@ -186,21 +186,20 @@ def _format_fasta_like_records(generator, id_whitespace_replacement,
             qual = seq.positional_metadata['quality'].values
 
         if lowercase is not None:
-            if hasattr(seq, 'lowercase'):
-                seq_str = seq.lowercase(lowercase)
-            else:
-                raise AttributeError("lowercase specified but class %s does "
-                                     "not support lowercase functionality" %
-                                     seq.__class__.__name__)
+            seq_str = seq.lowercase(lowercase)
         else:
             seq_str = str(seq)
         yield header, "%s" % seq_str, qual
 
 
-def _line_generator(fh, skip_blanks=False):
+def _line_generator(fh, skip_blanks=False, strip=True):
     for line in fh:
-        line = line.strip()
-        if line or not skip_blanks:
+        if strip:
+            line = line.strip()
+        skip = False
+        if skip_blanks:
+            skip = line.isspace() or not line
+        if not skip:
             yield line
 
 
diff --git a/skbio/io/format/_blast.py b/skbio/io/format/_blast.py
new file mode 100644
index 0000000..6247234
--- /dev/null
+++ b/skbio/io/format/_blast.py
@@ -0,0 +1,44 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+
+import functools
+
+import pandas as pd
+
+_possible_columns = {'qseqid': str, 'qgi': float, 'qacc': str, 'qaccver': str,
+                     'qlen': float, 'sseqid': str, 'sallseqid': str,
+                     'sgi': float, 'sallgi': float, 'sacc': str,
+                     'saccver': str, 'sallacc': str, 'slen': float,
+                     'qstart': float, 'qend': float, 'sstart': float,
+                     'send': float, 'qseq': str, 'sseq': str,
+                     'evalue': float, 'bitscore': float, 'score': float,
+                     'length': float, 'pident': float, 'nident': float,
+                     'mismatch': float, 'positive': float, 'gapopen': float,
+                     'gaps': float, 'ppos': float, 'frames': str,
+                     'qframe': float, 'sframe': float, 'btop': float,
+                     'staxids': str, 'sscinames': str, 'scomnames': str,
+                     'sblastnames': str, 'sskingdoms': str, 'stitle': str,
+                     'salltitles': str, 'sstrand': str, 'qcovs': float,
+                     'qcovhsp': float}
+
+
+def _parse_blast_data(fh, columns, error, error_message, comment=None,
+                      skiprows=None):
+    read_csv = functools.partial(pd.read_csv, na_values='N/A', sep='\t',
+                                 header=None, keep_default_na=False,
+                                 comment=comment, skiprows=skiprows)
+    lineone = read_csv(fh, nrows=1)
+
+    if len(lineone.columns) != len(columns):
+        raise error(error_message % (len(columns), len(lineone.columns)))
+
+    fh.seek(0)
+    return read_csv(fh, names=columns, dtype=_possible_columns)
diff --git a/skbio/io/format/blast6.py b/skbio/io/format/blast6.py
new file mode 100644
index 0000000..c06b1a3
--- /dev/null
+++ b/skbio/io/format/blast6.py
@@ -0,0 +1,276 @@
+"""
+BLAST+6 format (:mod:`skbio.io.format.blast6`)
+==============================================
+
+.. currentmodule:: skbio.io.format.blast6
+
+The BLAST+6 format (``blast+6``) stores the results of a BLAST [1]_ database
+search. The results are stored in a simple tabular format with no column
+headers. Values are separated by the tab character.
+
+An example BLAST+6-formatted file comparing two protein sequences, taken
+from [2]_ (tab characters represented by ``<tab>``)::
+
+    moaC<tab>gi|15800534|ref|NP_286546.1|<tab>100.00<tab>161<tab>0<tab>0<tab>1\
+<tab>161<tab>1<tab>161<tab>3e-114<tab>330
+    moaC<tab>gi|170768970|ref|ZP_02903423.1|<tab>99.38<tab>161<tab>1<tab>0\
+<tab>1<tab>161<tab>1<tab>161<tab>9e-114<tab>329
+
+Format Support
+--------------
+**Has Sniffer: No**
+
+**State: Experimental as of 0.4.1.**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |No    |:mod:`pandas.DataFrame`                                        |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+BLAST+6 format is a tabular text-based format produced by both BLAST+ output
+format 6 (``-outfmt 6``) and legacy BLAST output format 8 (``-m 8``). It is
+tab-separated and has no column headers. With BLAST+, users can specify the
+columns that are present in their BLAST output file by specifying column names
+(e.g., ``-outfmt "6 qseqid sseqid bitscore qstart sstart"``), if the default
+columns output by BLAST are not desired.
+
+BLAST Column Types
+^^^^^^^^^^^^^^^^^^
+The following column types are output by BLAST and supported by scikit-bio.
+This information is taken from [3]_.
+
++-----------+------------------------------------+-----+
+|Name       |Description                         |Type |
++===========+====================================+=====+
+|qseqid     |Query Seq-id                        |str  |
++-----------+------------------------------------+-----+
+|qgi        |Query GI                            |int  |
++-----------+------------------------------------+-----+
+|qacc       |Query accesion                      |str  |
++-----------+------------------------------------+-----+
+|qaccver    |Query accesion.version              |str  |
++-----------+------------------------------------+-----+
+|qlen       |Query sequence length               |int  |
++-----------+------------------------------------+-----+
+|sseqid     |Subject Seq-id                      |str  |
++-----------+------------------------------------+-----+
+|sallseqid  |All subject Seq-id(s), separated by |str  |
+|           |a ';'                               |     |
++-----------+------------------------------------+-----+
+|sgi        |Subject GI                          |int  |
++-----------+------------------------------------+-----+
+|sallgi     |All subject GIs                     |int  |
++-----------+------------------------------------+-----+
+|sacc       |Subject accesion                    |str  |
++-----------+------------------------------------+-----+
+|saccver    |Subject accesion.version            |str  |
++-----------+------------------------------------+-----+
+|sallacc    |All subject accesions               |str  |
++-----------+------------------------------------+-----+
+|slen       |Subject sequence length             |int  |
++-----------+------------------------------------+-----+
+|qstart     |Start of alignment in query         |int  |
++-----------+------------------------------------+-----+
+|qend       |End of alignment in query           |int  |
++-----------+------------------------------------+-----+
+|sstart     |Start of alignment in subject       |int  |
++-----------+------------------------------------+-----+
+|send       |End of alignment in subject         |int  |
++-----------+------------------------------------+-----+
+|qseq       |Aligned part of query sequence      |str  |
++-----------+------------------------------------+-----+
+|sseq       |Aligned part of subject sequence    |str  |
++-----------+------------------------------------+-----+
+|evalue     |Expect value                        |float|
++-----------+------------------------------------+-----+
+|bitscore   |Bit score                           |float|
++-----------+------------------------------------+-----+
+|score      |Raw score                           |int  |
++-----------+------------------------------------+-----+
+|length     |Alignment length                    |int  |
++-----------+------------------------------------+-----+
+|pident     |Percent of identical matches        |float|
++-----------+------------------------------------+-----+
+|nident     |Number of identical matches         |int  |
++-----------+------------------------------------+-----+
+|mismatch   |Number of mismatches                |int  |
++-----------+------------------------------------+-----+
+|positive   |Number of positive-scoring matches  |int  |
++-----------+------------------------------------+-----+
+|gapopen    |Number of gap openings              |int  |
++-----------+------------------------------------+-----+
+|gaps       |Total number of gaps                |int  |
++-----------+------------------------------------+-----+
+|ppos       |Percentage of positive-scoring matc\|float|
+|           |hes                                 |     |
++-----------+------------------------------------+-----+
+|frames     |Query and subject frames separated  |str  |
+|           |by a '/'                            |     |
++-----------+------------------------------------+-----+
+|qframe     |Query frame                         |int  |
++-----------+------------------------------------+-----+
+|sframe     |Subject frame                       |int  |
++-----------+------------------------------------+-----+
+|btop       |Blast traceback operations (BTOP)   |int  |
++-----------+------------------------------------+-----+
+|staxids    |Unique Subject Taxonomy ID(s), sepa\|str  |
+|           |rated by a ';' (in numerical order) |     |
++-----------+------------------------------------+-----+
+|sscinames  |Unique Subject Scientific Name(s),  |str  |
+|           |separated by a ';'                  |     |
++-----------+------------------------------------+-----+
+|scomnames  |Unique Subject Common Name(s), sepa\|str  |
+|           |rated by a ';'                      |     |
++-----------+------------------------------------+-----+
+|sblastnames|unique Subject Blast Name(s), separ\|str  |
+|           |ated by a ';' (in alphabetical      |     |
+|           |order)                              |     |
++-----------+------------------------------------+-----+
+|sskingdoms |unique Subject Super Kingdom(s), se\|str  |
+|           |parated by a ';' (in alphabetical   |     |
+|           |order)                              |     |
++-----------+------------------------------------+-----+
+|stitle     |Subject Title                       |str  |
++-----------+------------------------------------+-----+
+|sstrand    |Subject Strand                      |str  |
++-----------+------------------------------------+-----+
+|salltitles |All Subject Title(s), separated by  |str  |
+|           |a '<>'                              |     |
++-----------+------------------------------------+-----+
+|qcovs      |Query Coverage Per Subject          |int  |
++-----------+------------------------------------+-----+
+|qcovhsp    |Query Coverage Per HSP              |int  |
++-----------+------------------------------------+-----+
+
+.. note:: When a BLAST+6-formatted file contains ``N/A`` values, scikit-bio
+   will convert these values into ``np.nan``, matching pandas' convention for
+   representing missing data.
+
+.. note:: scikit-bio stores columns of type ``int`` as type ``float`` in the
+   returned ``pd.DataFrame``. This is necessary in order to allow ``N/A``
+   values in integer columns (this is currently a limitation of pandas).
+
+Format Parameters
+-----------------
+The following format parameters are available in ``blast+6`` format:
+
+- ``default_columns``: ``False`` by default. If ``True``, will use the default
+  columns output by BLAST, which are qseqid, sseqid, pident, length, mismatch,
+  gapopen, qstart, qend, sstart, send, evalue, and bitscore.
+
+  .. warning::  When reading legacy BLAST files, you must pass
+     ``default_columns=True`` because legacy BLAST does not allow users to
+     specify which columns are present in the output file.
+
+- ``columns``: ``None`` by default. If provided, must be a list of column names
+  in the order they will appear in the file.
+
+.. note:: Either ``default_columns`` or ``columns`` must be provided, as
+   ``blast+6`` does not contain column headers.
+
+Examples
+--------
+Suppose we have a ``blast+6`` file with default columns:
+
+>>> from io import StringIO
+>>> import skbio.io
+>>> import pandas as pd
+>>> fs = '\\n'.join([
+...     'moaC\\tgi|15800534|ref|NP_286546.1|\\t100.00\\t161\\t0\\t0\\t1\\t161\
+\\t1\\t161\\t3e-114\\t330',
+...     'moaC\\tgi|170768970|ref|ZP_02903423.1|\\t99.38\\t161\\t1\\t0\\t1\\t\
+161\\t1\\t161\\t9e-114\\t329'
+... ])
+>>> fh = StringIO(fs)
+
+Read the file into a ``pd.DataFrame`` and specify that default columns should
+be used:
+
+>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
+...                    default_columns=True)
+>>> df # doctest: +NORMALIZE_WHITESPACE
+  qseqid                           sseqid  pident  length  mismatch  gapopen \\
+0   moaC     gi|15800534|ref|NP_286546.1|  100.00     161         0        0
+1   moaC  gi|170768970|ref|ZP_02903423.1|   99.38     161         1        0
+<BLANKLINE>
+   qstart  qend  sstart  send         evalue  bitscore
+0       1   161       1   161  3.000000e-114       330
+1       1   161       1   161  9.000000e-114       329
+
+Suppose we have a ``blast+6`` file with user-supplied (non-default) columns:
+
+>>> from io import StringIO
+>>> import skbio.io
+>>> import pandas as pd
+>>> fs = '\\n'.join([
+...     'moaC\\t100.00\\t0\\t161\\t0\\t161\\t330\\t1',
+...     'moaC\\t99.38\\t1\\t161\\t0\\t161\\t329\\t1'
+... ])
+>>> fh = StringIO(fs)
+
+Read the file into a ``pd.DataFrame`` and specify which columns are present
+in the file:
+
+>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
+...                    columns=['qseqid', 'pident', 'mismatch', 'length',
+...                             'gapopen', 'qend', 'bitscore', 'sstart'])
+>>> df # doctest: +NORMALIZE_WHITESPACE
+  qseqid  pident  mismatch  length  gapopen  qend  bitscore  sstart
+0   moaC  100.00         0     161        0   161       330       1
+1   moaC   99.38         1     161        0   161       329       1
+
+References
+----------
+.. [1] Altschul, S.F., Gish, W., Miller, W., Myers, E.W. & Lipman, D.J. (1990)
+   "Basic local alignment search tool." J. Mol. Biol. 215:403-410.
+.. [2] http://blastedbio.blogspot.com/2014/11/column-headers-in-blast-tabular-\
+and-csv.html
+.. [3] http://www.ncbi.nlm.nih.gov/books/NBK279675/
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+
+import pandas as pd
+
+from skbio.io import create_format
+from skbio.io.format._blast import _parse_blast_data, _possible_columns
+
+blast6 = create_format('blast+6')
+
+_default_columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch',
+                    'gapopen', 'qstart', 'qend', 'sstart', 'send',
+                    'evalue', 'bitscore']
+
+
+ at blast6.reader(pd.DataFrame, monkey_patch=False)
+def _blast6_to_data_frame(fh, columns=None, default_columns=False):
+    if default_columns and columns is not None:
+        raise ValueError("`columns` and `default_columns` cannot both be"
+                         " provided.")
+    if not default_columns and columns is None:
+        raise ValueError("Either `columns` or `default_columns` must be"
+                         " provided.")
+    if default_columns:
+        columns = _default_columns
+    else:
+        for column in columns:
+            if column not in _possible_columns:
+                raise ValueError("Unrecognized column (%r)."
+                                 " Supported columns:\n%r" %
+                                 (column, set(_possible_columns.keys())))
+
+    return _parse_blast_data(fh, columns, ValueError,
+                             "Specified number of columns (%r) does not equal"
+                             " number of columns in file (%r).")
diff --git a/skbio/io/format/blast7.py b/skbio/io/format/blast7.py
new file mode 100644
index 0000000..9b108a6
--- /dev/null
+++ b/skbio/io/format/blast7.py
@@ -0,0 +1,384 @@
+"""
+BLAST+7 format (:mod:`skbio.io.format.blast7`)
+==============================================
+
+.. currentmodule:: skbio.io.format.blast7
+
+The BLAST+7 format (``blast+7``) stores the results of a BLAST [1]_ database
+search. This format is produced by both BLAST+ output
+format 7 and legacy BLAST output format 9. The results
+are stored in a simple tabular format with headers. Values are separated by the
+tab character.
+
+An example BLAST+7-formatted file comparing two nucleotide sequences, taken
+from [2]_ (tab characters represented by ``<tab>``):
+
+.. code-block:: none
+
+    # BLASTN 2.2.18+
+    # Query: gi|1786181|gb|AE000111.1|AE000111
+    # Subject: ecoli
+    # Fields: query acc., subject acc., evalue, q. start, q. end, s. st\
+art, s. end
+    # 5 hits found
+    AE000111<tab>AE000111<tab>0.0<tab>1<tab>10596<tab>1<tab>10596
+    AE000111<tab>AE000174<tab>8e-30<tab>5565<tab>5671<tab>6928<tab>6821
+    AE000111<tab>AE000394<tab>1e-27<tab>5587<tab>5671<tab>135<tab>219
+    AE000111<tab>AE000425<tab>6e-26<tab>5587<tab>5671<tab>8552<tab>8468
+    AE000111<tab>AE000171<tab>3e-24<tab>5587<tab>5671<tab>2214<tab>2130
+
+Format Support
+==============
+**Has Sniffer: Yes**
+
+**State: Experimental as of 0.4.1.**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |No    |:mod:`pandas.DataFrame`                                        |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+====================
+There are two BLAST+7 file formats supported by scikit-bio: BLAST+ output
+format 7 (``-outfmt 7``) and legacy BLAST output format 9 (``-m 9``). Both file
+formats are structurally similar, with minor differences.
+
+Example BLAST+ output format 7 file::
+
+    # BLASTP 2.2.31+
+    # Query: query1
+    # Subject: subject2
+    # Fields: q. start, q. end, s. start, s. end, identical, mismatches, sbjct\
+frame, query acc.ver, subject acc.ver
+    # 2 hits found
+    1	8	3	10	8	0	1	query1	subject2
+    2	5	2	15	8	0	2	query1	subject2
+
+.. note:: Database searches without hits may occur in BLAST+ output format 7
+   files. scikit-bio ignores these "empty" records:
+
+   .. code-block:: none
+
+       # BLASTP 2.2.31+
+       # Query: query1
+       # Subject: subject1
+       # 0 hits found
+
+Example legacy BLAST output format 9 file:
+
+.. code-block:: none
+
+    # BLASTN 2.2.3 [May-13-2002]
+    # Database: other_vertebrate
+    # Query: AF178033
+    # Fields:
+    Query id,Subject id,% identity,alignment length,mismatches,gap openings,q.\
+ start,q. end,s. start,s. end,e-value,bit score
+    AF178033    EMORG:AF178033  100.00  811 0   0   1   811 1   811 0.0 1566.6
+    AF178033    EMORG:AF031394  99.63   811 3   0   1   811 99  909 0.0 1542.8
+
+.. note:: scikit-bio requires fields to be consistent within a file.
+
+BLAST Column Types
+------------------
+The following column types are output by BLAST and supported by scikit-bio.
+For more information on these column types, see :mod:`skbio.io.format.blast6`.
+
++-------------------+----------------------+
+|Field Name         |DataFrame Column Name |
++===================+======================+
+|query id           |qseqid                |
++-------------------+----------------------+
+|query gi           |qgi                   |
++-------------------+----------------------+
+|query acc.         |qacc                  |
++-------------------+----------------------+
+|query acc.ver      |qaccver               |
++-------------------+----------------------+
+|query length       |qlen                  |
++-------------------+----------------------+
+|subject id         |sseqid                |
++-------------------+----------------------+
+|subject ids        |sallseqid             |
++-------------------+----------------------+
+|subject gi         |sgi                   |
++-------------------+----------------------+
+|subject gis        |sallgi                |
++-------------------+----------------------+
+|subject acc.       |sacc                  |
++-------------------+----------------------+
+|subject acc.ver    |saccver               |
++-------------------+----------------------+
+|subject accs       |sallacc               |
++-------------------+----------------------+
+|subject length     |slen                  |
++-------------------+----------------------+
+|q\. start          |qstart                |
++-------------------+----------------------+
+|q\. end            |qend                  |
++-------------------+----------------------+
+|s\. start          |sstart                |
++-------------------+----------------------+
+|s\. end            |send                  |
++-------------------+----------------------+
+|query seq          |qseq                  |
++-------------------+----------------------+
+|subject seq        |sseq                  |
++-------------------+----------------------+
+|evalue             |evalue                |
++-------------------+----------------------+
+|bit score          |bitscore              |
++-------------------+----------------------+
+|score              |score                 |
++-------------------+----------------------+
+|alignment length   |length                |
++-------------------+----------------------+
+|% identity         |pident                |
++-------------------+----------------------+
+|identical          |nident                |
++-------------------+----------------------+
+|mismatches         |mismatch              |
++-------------------+----------------------+
+|positives          |positive              |
++-------------------+----------------------+
+|gap opens          |gapopen               |
++-------------------+----------------------+
+|gaps               |gaps                  |
++-------------------+----------------------+
+|% positives        |ppos                  |
++-------------------+----------------------+
+|query/sbjct frames |frames                |
++-------------------+----------------------+
+|query frame        |qframe                |
++-------------------+----------------------+
+|sbjct frame        |sframe                |
++-------------------+----------------------+
+|BTOP               |btop                  |
++-------------------+----------------------+
+|subject tax ids    |staxids               |
++-------------------+----------------------+
+|subject sci names  |sscinames             |
++-------------------+----------------------+
+|subject com names  |scomnames             |
++-------------------+----------------------+
+|subject blast names|sblastnames           |
++-------------------+----------------------+
+|subject super king\|sskingdoms            |
+|doms               |                      |
++-------------------+----------------------+
+|subject title      |stitle                |
++-------------------+----------------------+
+|subject strand     |sstrand               |
++-------------------+----------------------+
+|subject titles     |salltitles            |
++-------------------+----------------------+
+|% query coverage p\|qcovs                 |
+|er subject         |                      |
++-------------------+----------------------+
+|% query coverage p\|qcovhsp               |
+|er hsp             |                      |
++-------------------+----------------------+
+
+Examples
+========
+Suppose we have a BLAST+7 file:
+
+>>> from io import StringIO
+>>> import skbio.io
+>>> import pandas as pd
+>>> fs = '\\n'.join([
+...     '# BLASTN 2.2.18+',
+...     '# Query: gi|1786181|gb|AE000111.1|AE000111',
+...     '# Database: ecoli',
+...     '# Fields: query acc., subject acc., evalue, q. start, q. end, s. st\
+art, s. end',
+...     '# 5 hits found',
+...     'AE000111\\tAE000111\\t0.0\\t1\\t10596\\t1\\t10596',
+...     'AE000111\\tAE000174\\t8e-30\\t5565\\t5671\\t6928\\t6821',
+...     'AE000111\\tAE000171\\t3e-24\\t5587\\t5671\\t2214\\t2130',
+...     'AE000111\\tAE000425\\t6e-26\\t5587\\t5671\\t8552\\t8468'
+... ])
+>>> fh = StringIO(fs)
+
+Read the file into a ``pd.DataFrame``:
+
+>>> df = skbio.io.read(fh, into=pd.DataFrame)
+>>> df # doctest: +NORMALIZE_WHITESPACE
+       qacc      sacc        evalue  qstart   qend  sstart   send
+0  AE000111  AE000111  0.000000e+00       1  10596       1  10596
+1  AE000111  AE000174  8.000000e-30    5565   5671    6928   6821
+2  AE000111  AE000171  3.000000e-24    5587   5671    2214   2130
+3  AE000111  AE000425  6.000000e-26    5587   5671    8552   8468
+
+Suppose we have a legacy BLAST 9 file:
+
+>>> from io import StringIO
+>>> import skbio.io
+>>> import pandas as pd
+>>> fs = '\\n'.join([
+...     '# BLASTN 2.2.3 [May-13-2002]',
+...     '# Database: other_vertebrate',
+...     '# Query: AF178033',
+...     '# Fields: ',
+...     'Query id,Subject id,% identity,alignment length,mismatches,gap openin\
+gs,q. start,q. end,s. start,s. end,e-value,bit score',
+...     'AF178033\\tEMORG:AF178033\\t100.00\\t811\\t0\\t0\\t1\\t811\\t1\\t81\
+1\\t0.0\\t1566.6',
+...     'AF178033\\tEMORG:AF178032\\t94.57\\t811\\t44\\t0\\t1\\t811\\t1\\t81\
+1\\t0.0\\t1217.7',
+...     'AF178033\\tEMORG:AF178031\\t94.82\\t811\\t42\\t0\\t1\\t811\\t1\\t81\
+1\\t0.0\\t1233.5'
+... ])
+>>> fh = StringIO(fs)
+
+Read the file into a ``pd.DataFrame``:
+
+>>> df = skbio.io.read(fh, into=pd.DataFrame)
+>>> df # doctest: +NORMALIZE_WHITESPACE
+     qseqid          sseqid  pident  length  mismatch  gapopen  qstart  qend \\
+0  AF178033  EMORG:AF178033  100.00     811         0        0       1   811
+1  AF178033  EMORG:AF178032   94.57     811        44        0       1   811
+2  AF178033  EMORG:AF178031   94.82     811        42        0       1   811
+<BLANKLINE>
+   sstart  send  evalue  bitscore
+0       1   811       0    1566.6
+1       1   811       0    1217.7
+2       1   811       0    1233.5
+
+References
+==========
+.. [1] Altschul, S.F., Gish, W., Miller, W., Myers, E.W. & Lipman, D.J. (1990)
+   "Basic local alignment search tool." J. Mol. Biol. 215:403-410.
+.. [2] http://www.ncbi.nlm.nih.gov/books/NBK279682/
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+
+import pandas as pd
+
+from skbio.io import create_format, BLAST7FormatError
+from skbio.io.format._blast import _parse_blast_data
+
+blast7 = create_format('blast+7')
+
+column_converter = {'query id': 'qseqid', 'query gi': 'qgi',
+                    'query acc.': 'qacc', 'query acc.ver': 'qaccver',
+                    'query length': 'qlen', 'subject id': 'sseqid',
+                    'subject ids': 'sallseqid', 'subject gi': 'sgi',
+                    'subject gis': 'sallgi', 'subject acc.': 'sacc',
+                    'subject acc.ver': 'saccver', 'subject accs.': 'sallacc',
+                    'subject length': 'slen', 'q. start': 'qstart',
+                    'q. end': 'qend', 's. start': 'sstart', 's. end': 'send',
+                    'query seq': 'qseq', 'subject seq': 'sseq',
+                    'evalue': 'evalue', 'bit score': 'bitscore',
+                    'score': 'score', 'alignment length': 'length',
+                    '% identity': 'pident', 'identical': 'nident',
+                    'mismatches': 'mismatch', 'positives': 'positive',
+                    'gap opens': 'gapopen', 'gaps': 'gaps',
+                    '% positives': 'ppos', 'query/sbjct frames': 'frames',
+                    'query frame': 'qframe', 'sbjct frame': 'sframe',
+                    'BTOP': 'btop', 'subject tax ids': 'staxids',
+                    'subject sci names': 'sscinames',
+                    'subject com names': 'scomnames',
+                    'subject blast names': 'sblastnames',
+                    'subject super kingdoms': 'sskingdoms',
+                    'subject title': 'stitle', 'subject titles': 'salltitles',
+                    'subject strand': 'sstrand',
+                    '% query coverage per subject': 'qcovs',
+                    '% query coverage per hsp': 'qcovhsp',
+                    'Query id': 'qseqid', 'Subject id': 'sseqid',
+                    'gap openings': 'gapopen', 'e-value': 'evalue'}
+
+
+ at blast7.sniffer()
+def _blast7_sniffer(fh):
+    # Smells a BLAST+7 file if the following conditions are present
+    #   -First line contains "BLAST"
+    #   -Second line contains "Query" or "Database"
+    #   -Third line starts with "Subject" or "Query" or "Database"
+    lines = [line for _, line in zip(range(3), fh)]
+    if len(lines) < 3:
+        return False, {}
+
+    if not lines[0].startswith("# BLAST"):
+        return False, {}
+    if not (lines[1].startswith("# Query:") or
+            lines[1].startswith("# Database:")):
+        return False, {}
+    if not (lines[2].startswith("# Subject:") or
+            lines[2].startswith("# Query:") or
+            lines[2].startswith("# Database:")):
+        return False, {}
+
+    return True, {}
+
+
+ at blast7.reader(pd.DataFrame, monkey_patch=False)
+def _blast7_to_data_frame(fh):
+    line_num = 0
+    columns = None
+    skiprows = []
+    for line in fh:
+        if line == "# Fields: \n":
+            # Identifies Legacy BLAST 9 data
+            line = next(fh)
+            line_num += 1
+            if columns is None:
+                columns = _parse_fields(line, legacy=True)
+                skiprows.append(line_num)
+            else:
+                next_columns = _parse_fields(line, legacy=True)
+                if columns != next_columns:
+                    raise BLAST7FormatError("Fields %r do not equal fields %r"
+                                            % (columns, next_columns))
+                skiprows.append(line_num)
+        elif line.startswith("# Fields: "):
+            # Identifies BLAST+7 data
+            if columns is None:
+                columns = _parse_fields(line)
+            else:
+                # Affirms data types do not differ throught file
+                next_columns = _parse_fields(line)
+                if columns != next_columns:
+                    raise BLAST7FormatError("Fields %r do not equal fields %r"
+                                            % (columns, next_columns))
+        line_num += 1
+    if columns is None:
+        # Affirms file contains BLAST data
+        raise BLAST7FormatError("File contains no BLAST data.")
+    fh.seek(0)
+
+    return _parse_blast_data(fh, columns, BLAST7FormatError,
+                             "Number of fields (%r) does not equal number"
+                             " of data columns (%r).", comment='#',
+                             skiprows=skiprows)
+
+
+def _parse_fields(line, legacy=False):
+    """Removes '\n' from fields line and returns fields as a list (columns)."""
+    line = line.rstrip('\n')
+    if legacy:
+        fields = line.split(',')
+    else:
+        line = line.split('# Fields: ')[1]
+        fields = line.split(', ')
+    columns = []
+    for field in fields:
+        if field not in column_converter:
+            raise BLAST7FormatError("Unrecognized field (%r)."
+                                    " Supported fields: %r"
+                                    % (field,
+                                       set(column_converter.keys())))
+        columns.append(column_converter[field])
+    return columns
diff --git a/skbio/io/format/clustal.py b/skbio/io/format/clustal.py
index f6253dd..3a12ddb 100644
--- a/skbio/io/format/clustal.py
+++ b/skbio/io/format/clustal.py
@@ -14,7 +14,7 @@ Format Support
 +------+------+---------------------------------------------------------------+
 |Reader|Writer|                          Object Class                         |
 +======+======+===============================================================+
-|Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
+|Yes   |Yes   |:mod:`skbio.alignment.TabularMSA`                              |
 +------+------+---------------------------------------------------------------+
 
 Format Specification
@@ -34,16 +34,29 @@ included in the examples below). A line containing conservation information
 about each position in the alignment can optionally follow all of the
 subsequences (not included in the examples below).
 
-.. note:: scikit-bio does not support writing conservation information
+.. note:: scikit-bio ignores conservation information when reading and does not
+   support writing conservation information.
 
-.. note:: scikit-bio will only write a clustal-formatted file if the
-   alignment's sequence characters are valid IUPAC characters, as defined in
-   :mod:`skbio.sequence`. The specific lexicon that is validated against
-   depends on the type of sequences stored in the alignment.
+.. note:: When reading a clustal-formatted file into an
+   ``skbio.alignment.TabularMSA`` object, sequence identifiers/labels are
+   stored as ``TabularMSA`` index labels (``index`` property).
+
+   When writing an ``skbio.alignment.TabularMSA`` object as a clustal-formatted
+   file, ``TabularMSA`` index labels will be converted to strings and written
+   as sequence identifiers/labels.
+
+Format Parameters
+-----------------
+The only supported format parameter is ``constructor``, which specifies the
+type of in-memory sequence object to read each aligned sequence into. This must
+be a subclass of ``IUPACSequence`` (e.g., ``DNA``, ``RNA``, ``Protein``) and is
+a required format parameter. For example, if you know that the clustal file
+you're reading contains DNA sequences, you would pass ``constructor=DNA`` to
+the reader call.
 
 Examples
 --------
-Assume we have a clustal-formatted file with the following contents::
+Assume we have a clustal-formatted file of RNA sequences::
 
     CLUSTAL W (1.82) multiple sequence alignment
 
@@ -55,22 +68,33 @@ Assume we have a clustal-formatted file with the following contents::
     def   ---------------CGCGAUGCAUGCAU-CGAU
     xyz   -----------CAUGCAUCGUACGUACGCAUGAC
 
-We can use the following code to read a clustal file into an ``Alignment``:
-
->>> from skbio import Alignment
->>> clustal_f = [u'CLUSTAL W (1.82) multiple sequence alignment\n',
-...              u'\n',
-...              u'abc   GCAUGCAUCUGCAUACGUACGUACGCAUGCA\n',
-...              u'def   -------------------------------\n',
-...              u'xyz   -------------------------------\n',
-...              u'\n',
-...              u'abc   GUCGAUACAUACGUACGUCGGUACGU-CGAC\n',
-...              u'def   ---------------CGUGCAUGCAU-CGAU\n',
-...              u'xyz   -----------CAUUCGUACGUACGCAUGAC\n']
->>> Alignment.read(clustal_f, format="clustal")
-<Alignment: n=3; mean +/- std length=62.00 +/- 0.00>
-
-We can use the following code to write an ``Alignment`` to a clustal-formatted
+We can use the following code to read the clustal file into a ``TabularMSA``:
+
+>>> from skbio import TabularMSA, RNA
+>>> clustal_f = ['CLUSTAL W (1.82) multiple sequence alignment\n',
+...              '\n',
+...              'abc   GCAUGCAUCUGCAUACGUACGUACGCAUGCA\n',
+...              'def   -------------------------------\n',
+...              'xyz   -------------------------------\n',
+...              '\n',
+...              'abc   GUCGAUACAUACGUACGUCGGUACGU-CGAC\n',
+...              'def   ---------------CGUGCAUGCAU-CGAU\n',
+...              'xyz   -----------CAUUCGUACGUACGCAUGAC\n']
+>>> msa = TabularMSA.read(clustal_f, constructor=RNA)
+>>> msa
+TabularMSA[RNA]
+--------------------------------------------------------------
+Stats:
+    sequence count: 3
+    position count: 62
+--------------------------------------------------------------
+GCAUGCAUCUGCAUACGUACGUACGCAUGCAGUCGAUACAUACGUACGUCGGUACGU-CGAC
+----------------------------------------------CGUGCAUGCAU-CGAU
+------------------------------------------CAUUCGUACGUACGCAUGAC
+>>> msa.index
+Index(['abc', 'def', 'xyz'], dtype='object')
+
+We can use the following code to write a ``TabularMSA`` to a clustal-formatted
 file:
 
 >>> from io import StringIO
@@ -78,9 +102,21 @@ file:
 >>> seqs = [DNA('ACCGTTGTA-GTAGCT', metadata={'id': 'seq1'}),
 ...         DNA('A--GTCGAA-GTACCT', metadata={'id': 'sequence-2'}),
 ...         DNA('AGAGTTGAAGGTATCT', metadata={'id': '3'})]
->>> aln = Alignment(seqs)
+>>> msa = TabularMSA(seqs, minter='id')
+>>> msa
+TabularMSA[DNA]
+----------------------
+Stats:
+    sequence count: 3
+    position count: 16
+----------------------
+ACCGTTGTA-GTAGCT
+A--GTCGAA-GTACCT
+AGAGTTGAAGGTATCT
+>>> msa.index
+Index(['seq1', 'sequence-2', '3'], dtype='object')
 >>> fh = StringIO()
->>> _ = aln.write(fh, format='clustal')
+>>> _ = msa.write(fh, format='clustal')
 >>> print(fh.getvalue()) # doctest: +NORMALIZE_WHITESPACE
 CLUSTAL
 <BLANKLINE>
@@ -110,14 +146,13 @@ from __future__ import (absolute_import, division, print_function,
                         unicode_literals)
 
 from skbio.io import create_format, ClustalFormatError
-from skbio.sequence import Sequence
-from skbio.alignment import Alignment
+from skbio.alignment import TabularMSA
 
 
 clustal = create_format('clustal')
 
 
-def _label_line_parser(record, strict=True):
+def _label_line_parser(record):
     """Returns dict mapping list of data to labels, plus list with field order.
 
     Field order contains labels in order encountered in file.
@@ -134,12 +169,9 @@ def _label_line_parser(record, strict=True):
         if len(split_line) == 2:
             key, val = split_line
         else:
-            if strict:
-                raise ClustalFormatError(
-                    "Failed to parse sequence identifier and subsequence from "
-                    "the following line: %r" % line)
-            else:
-                continue  # just skip the line if not strict
+            raise ClustalFormatError(
+                "Failed to parse sequence identifier and subsequence from "
+                "the following line: %r" % line)
 
         if key in result:
             result[key].append(val)
@@ -227,7 +259,7 @@ def _clustal_sniffer(fh):
     try:
         records = map(_delete_trailing_number,
                       filter(_is_clustal_seq_line, fh))
-        data, labels = _label_line_parser(records, strict=True)
+        data, labels = _label_line_parser(records)
         if len(data) > 0:
             empty = False
         # Only check first 50 sequences
@@ -239,19 +271,15 @@ def _clustal_sniffer(fh):
     return not empty, {}
 
 
- at clustal.writer(Alignment)
-def _alignment_to_clustal(obj, fh):
-    r"""writes aligned sequences to a specified file
-    Parameters
-    ----------
-    obj: Alignment object
-        An alignment object containing a set of Sequence objects
-    fh: open file handle object
-        An open file handle object containing Clustal sequences.
+ at clustal.writer(TabularMSA)
+def _tabular_msa_to_clustal(obj, fh):
+    if not obj.index.is_unique:
+        raise ClustalFormatError(
+            "TabularMSA's index labels must be unique.")
 
-    """
     clen = 60  # Max length of clustal lines
-    names, seqs = zip(*[(s.metadata['id'], str(s)) for s in obj])
+    seqs = [str(s) for s in obj]
+    names = [str(label) for label in obj.index]
     nameLen = max(map(len, names))
     seqLen = max(map(len, seqs))
     fh.write('CLUSTAL\n\n\n')
@@ -262,35 +290,31 @@ def _alignment_to_clustal(obj, fh):
         fh.write("\n")
 
 
- at clustal.reader(Alignment)
-def _clustal_to_alignment(fh, strict=True):
+ at clustal.reader(TabularMSA)
+def _clustal_to_tabular_msa(fh, constructor=None):
     r"""yields labels and sequences from msa (multiple sequence alignment)
 
     Parameters
     ----------
-
     fh : open file object
         An open Clustal file.
-    strict : boolean
-        Whether or not to raise a ``ClustalFormatError``
-        when no labels are found.
 
     Returns
     -------
-    skbio.Alignment
-        Alignment object containing aligned biogical sequences
+    skbio.TabularMSA
+        MSA containing aligned sequences.
 
     Raises
     ------
-        skbio.util.exception.ClustalFormatError
-            If the sequences in `fh` don't have the same sequence length
-            or if the sequence ids don't properly match with the subsequences
+    skbio.util.exception.ClustalFormatError
+        If the sequences in `fh` don't have the same sequence length
+        or if the sequence ids don't properly match with the subsequences
+
     Notes
     -----
-
     Skips any line that starts with a blank.
 
-    ``_clustal_to_alignment`` preserves the order of the sequences from the
+    ``_clustal_to_tabular_msa`` preserves the order of the sequences from the
     original file.  However, it does use a dict as an intermediate, so
     two sequences can't have the same label. This is probably OK since
     Clustal will refuse to run on a FASTA file in which two sequences have
@@ -310,16 +334,17 @@ def _clustal_to_alignment(fh, strict=True):
         Thompson", Nucleic Acids Res. 1994 Nov 11;22(22):4673-80.
 
     """
+    if constructor is None:
+        raise ValueError("Must provide `constructor`.")
 
     records = map(_delete_trailing_number,
                   filter(_is_clustal_seq_line, fh))
-    data, labels = _label_line_parser(records, strict)
+    data, labels = _label_line_parser(records)
 
     aligned_correctly = _check_length(data, labels)
     if not aligned_correctly:
         raise ClustalFormatError("Sequences not aligned properly")
-    alns = []
-    for key in labels:
-        alns.append(Sequence(sequence=''.join(data[key]),
-                             metadata={'id': key}))
-    return Alignment(alns)
+    seqs = []
+    for label in labels:
+        seqs.append(constructor(''.join(data[label])))
+    return TabularMSA(seqs, index=labels)
diff --git a/skbio/io/format/fasta.py b/skbio/io/format/fasta.py
index 070104b..e35ed02 100644
--- a/skbio/io/format/fasta.py
+++ b/skbio/io/format/fasta.py
@@ -33,9 +33,7 @@ Format Support
 +======+======+===============================================================+
 |Yes   |Yes   |generator of :mod:`skbio.sequence.Sequence` objects            |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.alignment.SequenceCollection`                      |
-+------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
+|Yes   |Yes   |:mod:`skbio.alignment.TabularMSA`                              |
 +------+------+---------------------------------------------------------------+
 |Yes   |Yes   |:mod:`skbio.sequence.Sequence`                                 |
 +------+------+---------------------------------------------------------------+
@@ -84,11 +82,18 @@ Sequence Header
 ~~~~~~~~~~~~~~~
 Each sequence header consists of a single line beginning with a greater-than
 (``>``) symbol. Immediately following this is a sequence identifier (ID) and
-description separated by one or more whitespace characters. The sequence ID and
-description are stored in the sequence `metadata` attribute, under the `'id'`
-and `'description'` keys, repectively. Both are optional. Each will be
-represented as the empty string (``''``) in `metadata` if it is not present
-in the header.
+description separated by one or more whitespace characters.
+
+.. note:: When reading a FASTA-formatted file, the sequence ID and description
+   are stored in the sequence `metadata` attribute, under the `'id'` and
+   `'description'` keys, repectively. Both are optional. Each will be
+   represented as the empty string (``''``) in `metadata` if it is not present
+   in the header.
+
+   When writing a FASTA-formatted file, sequence `metadata` identified by keys
+   `'id'` and `'description'` will be converted to strings and written as the
+   sequence identifier and description, respectively. Each will be written as
+   the empty string if not present in sequence `metadata`.
 
 A sequence ID consists of a single *word*: all characters after the greater-
 than symbol and before the first whitespace character (if any) are taken as the
@@ -97,14 +102,6 @@ itself. A single standardized ID format is similarly not enforced by the FASTA
 format, though it is often common to use a unique library accession number for
 a sequence ID (e.g., NCBI's FASTA defline format [5]_).
 
-.. note:: scikit-bio will enforce sequence ID uniqueness depending on the type
-   of object that the FASTA file is read into. For example, reading a FASTA
-   file as a generator of ``Sequence`` objects will not enforce
-   unique IDs since it simply yields each sequence it finds in the FASTA file.
-   However, if the FASTA file is read into a ``SequenceCollection`` object, ID
-   uniqueness will be enforced because that is a requirement of a
-   ``SequenceCollection``.
-
 If a description is present, it is taken as the remaining characters that
 follow the sequence ID and initial whitespace(s). The description is considered
 additional information about the sequence (e.g., comments about the source of
@@ -130,20 +127,18 @@ the standard IUPAC lexicon (single-letter codes).
 
 .. note:: scikit-bio supports both upper and lower case characters.
    This functionality depends on the type of object the data is
-   being read into. For ``Sequence``
-   objects, sciki-bio doesn't care about the case. However, for other object
-   types, such as :class:`skbio.sequence.DNA`, :class:`skbio.sequence.RNA`,
-   and :class:`skbio.sequence.Protein`, the `lowercase` parameter
-   must be used to control case functionality. Refer to the documentation for
-   the constructors for details.
-.. note:: Both ``-`` and ``.`` are supported as gap characters. See
-   :mod:`skbio.sequence` for more details on how scikit-bio interprets
-   sequence data in its in-memory objects.
-
-   Validation is performed for all scikit-bio objects which support it. This
-   consists of all objects which enforce usage of IUPAC characters. If any
-   invalid IUPAC characters are found in the sequence while reading from the
-   FASTA file, an exception is raised.
+   being read into. For ``Sequence`` objects, sciki-bio doesn't care about the
+   case. Other sequence objects do, but all provide the `lowercase` parameter
+   to control case functionality. Refer to each class's respective constructor
+   documentation for details.
+
+   Both ``-`` and ``.`` are supported as gap characters when reading into
+   ``DNA``, ``RNA``, and ``Protein`` sequence objects.
+
+   Validation is performed when reading into scikit-bio sequence objects that
+   enforce an alphabet (e.g., ``DNA``, ``RNA``, ``Protein``). If any invalid
+   characters are found while reading from the FASTA file, an exception is
+   raised.
 
 QUAL Format
 ^^^^^^^^^^^
@@ -154,14 +149,17 @@ containing a sequence ID and description. The same rules apply to QUAL headers
 as FASTA headers (see the above sections for details). scikit-bio processes
 FASTA and QUAL headers in exactly the same way.
 
-Quality scores are automatically stored in the object's `positional_metadata`
-attribute, under the `'quality'` column.
-
 Instead of storing biological sequence data in each record, a QUAL file stores
 a Phred quality score for each base in the corresponding sequence. Quality
 scores are represented as nonnegative integers separated by whitespace
 (typically a single space or newline), and can span multiple lines.
 
+.. note:: When reading a QUAL-formatted file, quality scores are stored in the
+   sequence's `positional_metadata` attribute under the `'quality'` column.
+
+   When writing a QUAL-formatted file, a sequence's `positional_metadata`
+   `'quality'` column will be written as the quality scores.
+
 .. note:: When reading FASTA and QUAL files, scikit-bio requires records to be
    in the same order in both files (i.e., each FASTA and QUAL record must have
    the same ID and description after being parsed). In addition to having the
@@ -193,19 +191,23 @@ Reader-specific Parameters
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 The available reader parameters differ depending on which reader is used.
 
-Generator, SequenceCollection, and Alignment Reader Parameters
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The ``constructor`` parameter can be used with the ``Sequence``
-generator, ``SequenceCollection``, and ``Alignment`` FASTA readers.
-``constructor`` specifies the in-memory type of each sequence that is parsed,
-and defaults to ``Sequence``. ``constructor`` should be a subclass of
-``Sequence``. For example, if you know that the FASTA file you're
-reading contains protein sequences, you would pass
+Generator and TabularMSA Reader Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``constructor`` parameter can be used with the ``Sequence`` generator and
+``TabularMSA`` FASTA readers. ``constructor`` specifies the type of in-memory
+sequence object to read each sequence into. For example, if you know that the
+FASTA file you're reading contains protein sequences, you would pass
 ``constructor=Protein`` to the reader call.
 
+When reading into a ``Sequence`` generator, ``constructor`` defaults to
+``Sequence`` and must be a subclass of ``Sequence`` if supplied.
+
+When reading into a ``TabularMSA``, ``constructor`` is a required format
+parameter and must be a subclass of ``IUPACSequence`` (e.g., ``DNA``, ``RNA``,
+``Protein``).
+
 .. note:: The FASTA sniffer will not attempt to guess the ``constructor``
-   parameter, so it will always default to ``Sequence`` if another
-   type is not provided to the reader.
+   parameter.
 
 Sequence Reader Parameters
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -308,89 +310,47 @@ Let's define this file in-memory as a ``StringIO``, though this could be a real
 file path, file handle, or anything that's supported by scikit-bio's I/O
 registry in practice:
 
->>> fl = [u">seq1 Turkey\\n",
-...       u"AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT\\n",
-...       u">seq2 Salmo gair\\n",
-...       u"AAGCCTTGGCAGTGCAGGGTGAGCCGTGG\\n",
-...       u"CCGGGCACGGTAT\\n",
-...       u">seq3 H. Sapiens\\n",
-...       u"ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA\\n",
-...       u">seq4 Chimp\\n",
-...       u"AAACCCTTGCCG\\n",
-...       u"TTACGCTTAAAC\\n",
-...       u"CGAGGCCGGGAC\\n",
-...       u"ACTCAT\\n",
-...       u">seq5 Gorilla\\n",
-...       u"AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA\\n"]
-
-Let's read the FASTA file into a ``SequenceCollection``:
-
->>> from skbio import SequenceCollection
->>> sc = SequenceCollection.read(fl)
->>> sc.sequence_lengths()
-[42, 42, 42, 42, 42]
->>> sc.ids()
-[u'seq1', u'seq2', u'seq3', u'seq4', u'seq5']
-
-We see that all 5 sequences have 42 characters, and that each of the sequence
-IDs were successfully read into memory.
+>>> fl = [">seq1 Turkey\\n",
+...       "AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT\\n",
+...       ">seq2 Salmo gair\\n",
+...       "AAGCCTTGGCAGTGCAGGGTGAGCCGTGG\\n",
+...       "CCGGGCACGGTAT\\n",
+...       ">seq3 H. Sapiens\\n",
+...       "ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA\\n",
+...       ">seq4 Chimp\\n",
+...       "AAACCCTTGCCG\\n",
+...       "TTACGCTTAAAC\\n",
+...       "CGAGGCCGGGAC\\n",
+...       "ACTCAT\\n",
+...       ">seq5 Gorilla\\n",
+...       "AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA\\n"]
 
 Since these sequences are of equal length (presumably because they've been
-aligned), let's load the FASTA file into an ``Alignment`` object, which is a
-more appropriate data structure:
-
->>> from skbio import Alignment
->>> aln = Alignment.read(fl)
->>> aln.sequence_length()
-42
-
-Note that we were able to read the FASTA file into two different data
-structures (``SequenceCollection`` and ``Alignment``) using the exact same
-``read`` method call (and underlying reading/parsing logic). Also note that we
-didn't specify a file format in the ``read`` call. The FASTA sniffer detected
-the correct file format for us!
+aligned), let's read the FASTA file into a ``TabularMSA`` object:
 
-Let's inspect the type of sequences stored in the ``Alignment``:
-
->>> aln[0]
-Sequence
-------------------------------------------------
-Metadata:
-    u'description': u'Turkey'
-    u'id': u'seq1'
-Stats:
-    length: 42
-------------------------------------------------
-0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
-
-By default, sequences are loaded as ``Sequence`` objects. We can
-change the type of sequence via the ``constructor`` parameter:
-
->>> from skbio import DNA
->>> aln = Alignment.read(fl, constructor=DNA)
->>> aln[0] # doctest: +NORMALIZE_WHITESPACE
-DNA
-------------------------------------------------
-Metadata:
-    u'description': u'Turkey'
-    u'id': u'seq1'
+>>> from skbio import TabularMSA, DNA
+>>> msa = TabularMSA.read(fl, constructor=DNA)
+>>> msa
+TabularMSA[DNA]
+------------------------------------------
 Stats:
-    length: 42
-    has gaps: False
-    has degenerates: True
-    has non-degenerates: True
-    GC-content: 54.76%
-------------------------------------------------
-0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
+    sequence count: 5
+    position count: 42
+------------------------------------------
+AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT
+AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT
+ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA
+AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT
+AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
 
-We now have an ``Alignment`` of ``DNA`` objects instead of
-``Sequence`` objects.
+Note that we didn't specify a file format in the ``read`` call. The FASTA
+sniffer detected the correct file format for us!
 
-To write the alignment in FASTA format:
+To write the ``TabularMSA`` in FASTA format:
 
 >>> from io import StringIO
 >>> with StringIO() as fh:
-...     print(aln.write(fh).getvalue())
+...     print(msa.write(fh).getvalue())
 >seq1 Turkey
 AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT
 >seq2 Salmo gair
@@ -403,15 +363,14 @@ AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT
 AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
 <BLANKLINE>
 
-Both ``SequenceCollection`` and ``Alignment`` load all of the sequences from
-the FASTA file into memory at once. If the FASTA file is large (which is often
-the case), this may be infeasible if you don't have enough memory. To work
-around this issue, you can stream the sequences using scikit-bio's
-generator-based FASTA reader and writer. The generator-based reader yields
-``Sequence`` objects (or subclasses if ``constructor`` is supplied)
-one at a time, instead of loading all sequences into memory. For example, let's
-use the generator-based reader to process a single sequence at a time in a
-``for`` loop:
+``TabularMSA`` loads all of the sequences from the FASTA file into memory at
+once. If the FASTA file is large (which is often the case), this may be
+infeasible if you don't have enough memory. To work around this issue, you can
+stream the sequences using scikit-bio's generator-based FASTA reader and
+writer. The generator-based reader yields ``Sequence`` objects (or subclasses
+if ``constructor`` is supplied) one at a time, instead of loading all sequences
+into memory. For example, let's use the generator-based reader to process a
+single sequence at a time in a ``for`` loop:
 
 >>> import skbio.io
 >>> for seq in skbio.io.read(fl, format='fasta'):
@@ -420,8 +379,8 @@ use the generator-based reader to process a single sequence at a time in a
 Sequence
 ------------------------------------------------
 Metadata:
-    u'description': u'Turkey'
-    u'id': u'seq1'
+    'description': 'Turkey'
+    'id': 'seq1'
 Stats:
     length: 42
 ------------------------------------------------
@@ -430,8 +389,8 @@ Stats:
 Sequence
 ------------------------------------------------
 Metadata:
-    u'description': u'Salmo gair'
-    u'id': u'seq2'
+    'description': 'Salmo gair'
+    'id': 'seq2'
 Stats:
     length: 42
 ------------------------------------------------
@@ -440,8 +399,8 @@ Stats:
 Sequence
 ------------------------------------------------
 Metadata:
-    u'description': u'H. Sapiens'
-    u'id': u'seq3'
+    'description': 'H. Sapiens'
+    'id': 'seq3'
 Stats:
     length: 42
 ------------------------------------------------
@@ -450,8 +409,8 @@ Stats:
 Sequence
 ------------------------------------------------
 Metadata:
-    u'description': u'Chimp'
-    u'id': u'seq4'
+    'description': 'Chimp'
+    'id': 'seq4'
 Stats:
     length: 42
 ------------------------------------------------
@@ -460,8 +419,8 @@ Stats:
 Sequence
 ------------------------------------------------
 Metadata:
-    u'description': u'Gorilla'
-    u'id': u'seq5'
+    'description': 'Gorilla'
+    'id': 'seq5'
 Stats:
     length: 42
 ------------------------------------------------
@@ -476,8 +435,8 @@ A single sequence can also be read into a ``Sequence`` (or subclass):
 Sequence
 ------------------------------------------------
 Metadata:
-    u'description': u'Turkey'
-    u'id': u'seq1'
+    'description': 'Turkey'
+    'id': 'seq1'
 Stats:
     length: 42
 ------------------------------------------------
@@ -491,22 +450,22 @@ controlled with ``seq_num``. For example, to read the fifth sequence:
 Sequence
 ------------------------------------------------
 Metadata:
-    u'description': u'Gorilla'
-    u'id': u'seq5'
+    'description': 'Gorilla'
+    'id': 'seq5'
 Stats:
     length: 42
 ------------------------------------------------
 0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
 
-We can use the same API to read the fifth sequence into a ``DNA``:
+We can use the same API to read the fifth sequence into a ``DNA`` sequence:
 
 >>> dna_seq = DNA.read(fl, seq_num=5)
 >>> dna_seq
 DNA
 ------------------------------------------------
 Metadata:
-    u'description': u'Gorilla'
-    u'id': u'seq5'
+    'description': 'Gorilla'
+    'id': 'seq5'
 Stats:
     length: 42
     has gaps: False
@@ -526,14 +485,14 @@ AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
 
 Reading and Writing FASTA/QUAL Files
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-In addition to reading and writing standalone FASTA files, scikit-bio also
-supports reading and writing FASTA and QUAL files together. Suppose we have the
+In addition to reading and writing standalone FASTA files, scikit-bio supports
+reading and writing FASTA and QUAL files together. Suppose we have the
 following FASTA file::
 
     >seq1 db-accession-149855
     CGATGTC
     >seq2 db-accession-34989
-    CATCG
+    CATCGTC
 
 Also suppose we have the following QUAL file::
 
@@ -541,19 +500,19 @@ Also suppose we have the following QUAL file::
     40 39 39 4
     50 1 100
     >seq2 db-accession-34989
-    3 3 10 42 80
+    3 3 10 42 80 80 79
 
 >>> fasta_fl = [
-...     u">seq1 db-accession-149855\\n",
-...     u"CGATGTC\\n",
-...     u">seq2 db-accession-34989\\n",
-...     u"CATCG\\n"]
+...     ">seq1 db-accession-149855\\n",
+...     "CGATGTC\\n",
+...     ">seq2 db-accession-34989\\n",
+...     "CATCGTC\\n"]
 >>> qual_fl = [
-...     u">seq1 db-accession-149855\\n",
-...     u"40 39 39 4\\n",
-...     u"50 1 100\\n",
-...     u">seq2 db-accession-34989\\n",
-...     u"3 3 10 42 80\\n"]
+...     ">seq1 db-accession-149855\\n",
+...     "40 39 39 4\\n",
+...     "50 1 100\\n",
+...     ">seq2 db-accession-34989\\n",
+...     "3 3 10 42 80 80 79\\n"]
 
 To read in a single ``Sequence`` at a time, we can use the
 generator-based reader as we did above, providing both FASTA and QUAL files:
@@ -562,58 +521,64 @@ generator-based reader as we did above, providing both FASTA and QUAL files:
 ...     seq
 ...     print('')
 Sequence
-------------------------------------------
+----------------------------------------
 Metadata:
-    u'description': u'db-accession-149855'
-    u'id': u'seq1'
+    'description': 'db-accession-149855'
+    'id': 'seq1'
 Positional metadata:
-    u'quality': <dtype: uint8>
+    'quality': <dtype: uint8>
 Stats:
     length: 7
-------------------------------------------
+----------------------------------------
 0 CGATGTC
 <BLANKLINE>
 Sequence
------------------------------------------
+---------------------------------------
 Metadata:
-    u'description': u'db-accession-34989'
-    u'id': u'seq2'
+    'description': 'db-accession-34989'
+    'id': 'seq2'
 Positional metadata:
-    u'quality': <dtype: uint8>
+    'quality': <dtype: uint8>
 Stats:
-    length: 5
------------------------------------------
-0 CATCG
+    length: 7
+---------------------------------------
+0 CATCGTC
 <BLANKLINE>
 
 Note that the sequence objects have quality scores stored as positional
 metadata since we provided a QUAL file. The other FASTA readers operate in a
 similar manner.
 
-Now let's load the sequences and their quality scores into a
-``SequenceCollection``:
+Now let's load the sequences and their quality scores into a ``TabularMSA``:
 
->>> sc = SequenceCollection.read(fasta_fl, qual=qual_fl)
->>> sc
-<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
+>>> msa = TabularMSA.read(fasta_fl, qual=qual_fl, constructor=DNA)
+>>> msa
+TabularMSA[DNA]
+---------------------
+Stats:
+    sequence count: 2
+    position count: 7
+---------------------
+CGATGTC
+CATCGTC
 
-To write the sequence data and quality scores in the ``SequenceCollection`` to
-FASTA and QUAL files, respectively, we run:
+To write the sequence data and quality scores in the ``TabularMSA`` to FASTA
+and QUAL files, respectively:
 
 >>> new_fasta_fh = StringIO()
 >>> new_qual_fh = StringIO()
->>> _ = sc.write(new_fasta_fh, qual=new_qual_fh)
+>>> _ = msa.write(new_fasta_fh, qual=new_qual_fh)
 >>> print(new_fasta_fh.getvalue())
 >seq1 db-accession-149855
 CGATGTC
 >seq2 db-accession-34989
-CATCG
+CATCGTC
 <BLANKLINE>
 >>> print(new_qual_fh.getvalue())
 >seq1 db-accession-149855
 40 39 39 4 50 1 100
 >seq2 db-accession-34989
-3 3 10 42 80
+3 3 10 42 80 80 79
 <BLANKLINE>
 >>> new_fasta_fh.close()
 >>> new_qual_fh.close()
@@ -658,7 +623,7 @@ from skbio.io.format._base import (_get_nth_sequence,
                                    _format_fasta_like_records, _line_generator,
                                    _too_many_blanks)
 from skbio.util._misc import chunk_str
-from skbio.alignment import SequenceCollection, Alignment
+from skbio.alignment import TabularMSA
 from skbio.sequence import Sequence, DNA, RNA, Protein
 
 
@@ -749,14 +714,14 @@ def _fasta_to_generator(fh, qual=FileSentinel, constructor=Sequence, **kwargs):
 
 
 @fasta.reader(Sequence)
-def _fasta_to_biological_sequence(fh, qual=FileSentinel, seq_num=1):
+def _fasta_to_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
     return _get_nth_sequence(
-        _fasta_to_generator(fh, qual=qual, constructor=Sequence),
+        _fasta_to_generator(fh, qual=qual, constructor=Sequence, **kwargs),
         seq_num)
 
 
 @fasta.reader(DNA)
-def _fasta_to_dna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
+def _fasta_to_dna(fh, qual=FileSentinel, seq_num=1, **kwargs):
     return _get_nth_sequence(
         _fasta_to_generator(fh, qual=qual,
                             constructor=DNA, **kwargs),
@@ -764,7 +729,7 @@ def _fasta_to_dna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
 
 
 @fasta.reader(RNA)
-def _fasta_to_rna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
+def _fasta_to_rna(fh, qual=FileSentinel, seq_num=1, **kwargs):
     return _get_nth_sequence(
         _fasta_to_generator(fh, qual=qual,
                             constructor=RNA, **kwargs),
@@ -772,26 +737,20 @@ def _fasta_to_rna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
 
 
 @fasta.reader(Protein)
-def _fasta_to_protein_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
+def _fasta_to_protein(fh, qual=FileSentinel, seq_num=1, **kwargs):
     return _get_nth_sequence(
         _fasta_to_generator(fh, qual=qual,
                             constructor=Protein, **kwargs),
         seq_num)
 
 
- at fasta.reader(SequenceCollection)
-def _fasta_to_sequence_collection(fh, qual=FileSentinel,
-                                  constructor=Sequence, **kwargs):
-    return SequenceCollection(
-        list(_fasta_to_generator(fh, qual=qual, constructor=constructor,
-                                 **kwargs)))
+ at fasta.reader(TabularMSA)
+def _fasta_to_tabular_msa(fh, qual=FileSentinel, constructor=None, **kwargs):
+    if constructor is None:
+        raise ValueError("Must provide `constructor`.")
 
-
- at fasta.reader(Alignment)
-def _fasta_to_alignment(fh, qual=FileSentinel, constructor=Sequence, **kwargs):
-    return Alignment(
-        list(_fasta_to_generator(fh, qual=qual, constructor=constructor,
-                                 **kwargs)))
+    return TabularMSA(
+        _fasta_to_generator(fh, qual=qual, constructor=constructor, **kwargs))
 
 
 @fasta.writer(None)
@@ -830,55 +789,44 @@ def _generator_to_fasta(obj, fh, qual=FileSentinel,
 
 
 @fasta.writer(Sequence)
-def _biological_sequence_to_fasta(obj, fh, qual=FileSentinel,
-                                  id_whitespace_replacement='_',
-                                  description_newline_replacement=' ',
-                                  max_width=None):
+def _sequence_to_fasta(obj, fh, qual=FileSentinel,
+                       id_whitespace_replacement='_',
+                       description_newline_replacement=' ', max_width=None,
+                       lowercase=None):
     _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width)
+                        description_newline_replacement, max_width, lowercase)
 
 
 @fasta.writer(DNA)
-def _dna_sequence_to_fasta(obj, fh, qual=FileSentinel,
-                           id_whitespace_replacement='_',
-                           description_newline_replacement=' ',
-                           max_width=None, lowercase=None):
+def _dna_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_',
+                  description_newline_replacement=' ', max_width=None,
+                  lowercase=None):
     _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
                         description_newline_replacement, max_width, lowercase)
 
 
 @fasta.writer(RNA)
-def _rna_sequence_to_fasta(obj, fh, qual=FileSentinel,
-                           id_whitespace_replacement='_',
-                           description_newline_replacement=' ',
-                           max_width=None, lowercase=None):
+def _rna_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_',
+                  description_newline_replacement=' ', max_width=None,
+                  lowercase=None):
     _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
                         description_newline_replacement, max_width, lowercase)
 
 
 @fasta.writer(Protein)
-def _protein_sequence_to_fasta(obj, fh, qual=FileSentinel,
-                               id_whitespace_replacement='_',
-                               description_newline_replacement=' ',
-                               max_width=None, lowercase=None):
+def _protein_to_fasta(obj, fh, qual=FileSentinel,
+                      id_whitespace_replacement='_',
+                      description_newline_replacement=' ', max_width=None,
+                      lowercase=None):
     _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
                         description_newline_replacement, max_width, lowercase)
 
 
- at fasta.writer(SequenceCollection)
-def _sequence_collection_to_fasta(obj, fh, qual=FileSentinel,
-                                  id_whitespace_replacement='_',
-                                  description_newline_replacement=' ',
-                                  max_width=None, lowercase=None):
-    _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width, lowercase)
-
-
- at fasta.writer(Alignment)
-def _alignment_to_fasta(obj, fh, qual=FileSentinel,
-                        id_whitespace_replacement='_',
-                        description_newline_replacement=' ', max_width=None,
-                        lowercase=None):
+ at fasta.writer(TabularMSA)
+def _tabular_msa_to_fasta(obj, fh, qual=FileSentinel,
+                          id_whitespace_replacement='_',
+                          description_newline_replacement=' ', max_width=None,
+                          lowercase=None):
     _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
                         description_newline_replacement, max_width, lowercase)
 
diff --git a/skbio/io/format/fastq.py b/skbio/io/format/fastq.py
index af36416..c2b58ea 100644
--- a/skbio/io/format/fastq.py
+++ b/skbio/io/format/fastq.py
@@ -38,9 +38,7 @@ Format Support
 +======+======+===============================================================+
 |Yes   |Yes   |generator of :mod:`skbio.sequence.Sequence` objects            |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.alignment.SequenceCollection`                      |
-+------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
+|Yes   |Yes   |:mod:`skbio.alignment.TabularMSA`                              |
 +------+------+---------------------------------------------------------------+
 |Yes   |Yes   |:mod:`skbio.sequence.Sequence`                                 |
 +------+------+---------------------------------------------------------------+
@@ -74,8 +72,8 @@ files provided in the publication's supplementary data.
 
 .. note:: IDs and descriptions will be parsed from sequence header lines in
    exactly the same way as FASTA headers (:mod:`skbio.io.format.fasta`). IDs,
-   descriptions, and quality scores are also stored automatically on the
-   object in the same way as with FASTA.
+   descriptions, and quality scores are also stored on, and written from,
+   sequence objects in the same way as with FASTA.
 
 .. note:: Blank or whitespace-only lines are only allowed at the beginning of
    the file, between FASTQ records, or at the end of the file. A blank or
@@ -173,10 +171,10 @@ Suppose we have the following FASTQ file with two DNA sequences::
 
     @seq1 description 1
     AACACCAAACTTCTCCACC
-    ACGTGAGCTACAAAAGGGT
+    ACGTGAGCTACAAAAG
     +seq1 description 1
     ''''Y^T]']C^CABCACC
-    `^LB^CCYT\T\Y\WF^^^
+    `^LB^CCYT\T\Y\WF
     @seq2 description 2
     TATGTATATATAACATATACATATATACATACATA
     +
@@ -195,37 +193,43 @@ registry in practice:
 >>> fs = '\n'.join([
 ...     r"@seq1 description 1",
 ...     r"AACACCAAACTTCTCCACC",
-...     r"ACGTGAGCTACAAAAGGGT",
+...     r"ACGTGAGCTACAAAAG",
 ...     r"+seq1 description 1",
 ...     r"''''Y^T]']C^CABCACC",
-...     r"'^LB^CCYT\T\Y\WF^^^",
+...     r"'^LB^CCYT\T\Y\WF",
 ...     r"@seq2 description 2",
 ...     r"TATGTATATATAACATATACATATATACATACATA",
 ...     r"+",
 ...     r"]KZ[PY]_[YY^'''AC^\\'BT''C'\AT''BBB"])
 >>> fh = StringIO(fs)
 
-To load the sequences into a ``SequenceCollection``, we run:
+To load the sequences into a ``TabularMSA``, we run:
 
->>> from skbio import SequenceCollection
->>> sc = SequenceCollection.read(fh, variant='sanger')
->>> sc
-<SequenceCollection: n=2; mean +/- std length=36.50 +/- 1.50>
+>>> from skbio import TabularMSA, DNA
+>>> msa = TabularMSA.read(fh, constructor=DNA, variant='sanger')
+>>> msa
+TabularMSA[DNA]
+-----------------------------------
+Stats:
+    sequence count: 2
+    position count: 35
+-----------------------------------
+AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+TATGTATATATAACATATACATATATACATACATA
 
 Note that quality scores are decoded from Sanger. To load the second sequence
-as a ``DNA``:
+as ``DNA``:
 
->>> from skbio import DNA
 >>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
 >>> seq = DNA.read(fh, variant='sanger', seq_num=2)
 >>> seq
 DNA
 ----------------------------------------
 Metadata:
-    u'description': u'description 2'
-    u'id': u'seq2'
+    'description': 'description 2'
+    'id': 'seq2'
 Positional metadata:
-    u'quality': <dtype: uint8>
+    'quality': <dtype: uint8>
 Stats:
     length: 35
     has gaps: False
@@ -235,15 +239,15 @@ Stats:
 ----------------------------------------
 0 TATGTATATA TAACATATAC ATATATACAT ACATA
 
-To write our ``SequenceCollection`` to a FASTQ file with quality scores encoded
-using the ``illumina1.3`` variant:
+To write our ``TabularMSA`` to a FASTQ file with quality scores encoded using
+the ``illumina1.3`` variant:
 
 >>> new_fh = StringIO()
->>> print(sc.write(new_fh, format='fastq', variant='illumina1.3').getvalue())
+>>> print(msa.write(new_fh, format='fastq', variant='illumina1.3').getvalue())
 @seq1 description 1
-AACACCAAACTTCTCCACCACGTGAGCTACAAAAGGGT
+AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
 +
-FFFFx}s|F|b}b`ab`bbF}ka}bbxs{s{x{ve}}}
+FFFFx}s|F|b}b`ab`bbF}ka}bbxs{s{x{ve
 @seq2 description 2
 TATGTATATATAACATATACATATATACATACATA
 +
@@ -290,7 +294,7 @@ from skbio.io.format._base import (
     _decode_qual_to_phred, _encode_phred_to_qual, _get_nth_sequence,
     _parse_fasta_like_header, _format_fasta_like_records, _line_generator,
     _too_many_blanks)
-from skbio.alignment import SequenceCollection, Alignment
+from skbio.alignment import TabularMSA
 from skbio.sequence import Sequence, DNA, RNA, Protein
 
 _whitespace_regex = re.compile(r'\s')
@@ -348,17 +352,16 @@ def _fastq_to_generator(fh, variant=None, phred_offset=None,
 
 
 @fastq.reader(Sequence)
-def _fastq_to_biological_sequence(fh, variant=None, phred_offset=None,
-                                  seq_num=1):
+def _fastq_to_sequence(fh, variant=None, phred_offset=None, seq_num=1,
+                       **kwargs):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
-                            constructor=Sequence),
+                            constructor=Sequence, **kwargs),
         seq_num)
 
 
 @fastq.reader(DNA)
-def _fastq_to_dna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
-                           **kwargs):
+def _fastq_to_dna(fh, variant=None, phred_offset=None, seq_num=1,  **kwargs):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
                             constructor=DNA, **kwargs),
@@ -366,8 +369,7 @@ def _fastq_to_dna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
 
 
 @fastq.reader(RNA)
-def _fastq_to_rna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
-                           **kwargs):
+def _fastq_to_rna(fh, variant=None, phred_offset=None, seq_num=1, **kwargs):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
                             constructor=RNA, **kwargs),
@@ -375,8 +377,8 @@ def _fastq_to_rna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
 
 
 @fastq.reader(Protein)
-def _fastq_to_protein_sequence(fh, variant=None, phred_offset=None, seq_num=1,
-                               **kwargs):
+def _fastq_to_protein(fh, variant=None, phred_offset=None, seq_num=1,
+                      **kwargs):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
                             constructor=Protein,
@@ -384,22 +386,15 @@ def _fastq_to_protein_sequence(fh, variant=None, phred_offset=None, seq_num=1,
         seq_num)
 
 
- at fastq.reader(SequenceCollection)
-def _fastq_to_sequence_collection(fh, variant=None, phred_offset=None,
-                                  constructor=Sequence, **kwargs):
-    return SequenceCollection(
-        list(_fastq_to_generator(fh, variant=variant,
-                                 phred_offset=phred_offset,
-                                 constructor=constructor, **kwargs)))
-
+ at fastq.reader(TabularMSA)
+def _fastq_to_tabular_msa(fh, variant=None, phred_offset=None,
+                          constructor=None, **kwargs):
+    if constructor is None:
+        raise ValueError("Must provide `constructor`.")
 
- at fastq.reader(Alignment)
-def _fastq_to_alignment(fh, variant=None, phred_offset=None,
-                        constructor=Sequence, **kwargs):
-    return Alignment(
-        list(_fastq_to_generator(fh, variant=variant,
-                                 phred_offset=phred_offset,
-                                 constructor=constructor, **kwargs)))
+    return TabularMSA(
+        _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
+                            constructor=constructor, **kwargs))
 
 
 @fastq.writer(None)
@@ -422,59 +417,45 @@ def _generator_to_fastq(obj, fh, variant=None, phred_offset=None,
 
 
 @fastq.writer(Sequence)
-def _biological_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
-                                  id_whitespace_replacement='_',
-                                  description_newline_replacement=' '):
+def _sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
+                       id_whitespace_replacement='_',
+                       description_newline_replacement=' ', lowercase=None):
     _sequences_to_fastq([obj], fh, variant, phred_offset,
                         id_whitespace_replacement,
-                        description_newline_replacement)
+                        description_newline_replacement, lowercase=lowercase)
 
 
 @fastq.writer(DNA)
-def _dna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
-                           id_whitespace_replacement='_',
-                           description_newline_replacement=' ',
-                           lowercase=None):
+def _dna_to_fastq(obj, fh, variant=None, phred_offset=None,
+                  id_whitespace_replacement='_',
+                  description_newline_replacement=' ', lowercase=None):
     _sequences_to_fastq([obj], fh, variant, phred_offset,
                         id_whitespace_replacement,
                         description_newline_replacement, lowercase=lowercase)
 
 
 @fastq.writer(RNA)
-def _rna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
-                           id_whitespace_replacement='_',
-                           description_newline_replacement=' ',
-                           lowercase=None):
+def _rna_to_fastq(obj, fh, variant=None, phred_offset=None,
+                  id_whitespace_replacement='_',
+                  description_newline_replacement=' ', lowercase=None):
     _sequences_to_fastq([obj], fh, variant, phred_offset,
                         id_whitespace_replacement,
                         description_newline_replacement, lowercase=lowercase)
 
 
 @fastq.writer(Protein)
-def _protein_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
-                               id_whitespace_replacement='_',
-                               description_newline_replacement=' ',
-                               lowercase=None):
+def _protein_to_fastq(obj, fh, variant=None, phred_offset=None,
+                      id_whitespace_replacement='_',
+                      description_newline_replacement=' ', lowercase=None):
     _sequences_to_fastq([obj], fh, variant, phred_offset,
                         id_whitespace_replacement,
                         description_newline_replacement, lowercase=lowercase)
 
 
- at fastq.writer(SequenceCollection)
-def _sequence_collection_to_fastq(obj, fh, variant=None, phred_offset=None,
-                                  id_whitespace_replacement='_',
-                                  description_newline_replacement=' ',
-                                  lowercase=None):
-    _sequences_to_fastq(obj, fh, variant, phred_offset,
-                        id_whitespace_replacement,
-                        description_newline_replacement, lowercase=lowercase)
-
-
- at fastq.writer(Alignment)
-def _alignment_to_fastq(obj, fh, variant=None, phred_offset=None,
-                        id_whitespace_replacement='_',
-                        description_newline_replacement=' ',
-                        lowercase=None):
+ at fastq.writer(TabularMSA)
+def _tabular_msa_to_fastq(obj, fh, variant=None, phred_offset=None,
+                          id_whitespace_replacement='_',
+                          description_newline_replacement=' ', lowercase=None):
     _sequences_to_fastq(obj, fh, variant, phred_offset,
                         id_whitespace_replacement,
                         description_newline_replacement, lowercase=lowercase)
diff --git a/skbio/io/format/genbank.py b/skbio/io/format/genbank.py
new file mode 100644
index 0000000..7d3ec5d
--- /dev/null
+++ b/skbio/io/format/genbank.py
@@ -0,0 +1,890 @@
+"""
+GenBank format (:mod:`skbio.io.format.genbank`)
+===============================================
+
+.. currentmodule:: skbio.io.format.genbank
+
+GenBank format (GenBank Flat File Format) stores sequence and its annotation
+together. The start of the annotation section is marked by a line beginning
+with the word "LOCUS". The start of sequence section is marked by a line
+beginning with the word "ORIGIN" and the end of the section is marked by a line
+with only "//".
+
+The GenBank file usually ends with .gb or sometimes .gbk. The GenBank format
+for protein has been renamed to GenPept. The GenBank (for nucleotide) and
+Genpept are essentially the same format.
+
+An example of a GenBank file can be see here:
+<http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html>
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |Yes   |:mod:`skbio.sequence.Sequence`                                 |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.DNA`                                      |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.RNA`                                      |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.Protein`                                  |
++------+------+---------------------------------------------------------------+
+|Yes   | Yes  | generator of :mod:`skbio.sequence.Sequence` objects           |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+**State: Experimental as of 0.4.1.**
+
+The International Nucleotide Sequence Database Collaboration (INSDC)
+foundational initiative between the DDBJ, EMBL, and GenBank
+(http://www.insdc.org/). These organisations all use the
+same "Feature Table" layout in their plain text flat file formats.
+
+However, the header and sequence sections of an EMBL file are very
+different in layout to those produced by GenBank/DDBJ.
+
+Feature Table Documentation:
+http://www.insdc.org/files/feature_table.html
+ftp://ftp.ncbi.nih.gov/genbank/docs/FTv10_3.html
+
+The sequence in the ``'ORIGIN'`` section is always in lowercase for the
+GenBank files downloaded from NCBI. For the RNA molecules, ``'t'`` (thymine),
+instead of ``'u'`` (uracil) is used in the sequence. All GenBank writers
+follow these conventions while writing GenBank files.
+
+All the sections before ``'FEATURES'`` will be read into ``metadata`` of
+``Sequence`` or its sub-class. The header and its content of a section
+is stored as a pair of key and value in ``metadata``. For the ``'REFERENCE'``
+section, its value is stored as a list, as there are often multiple
+reference sections in one GenBank record.
+
+The information of the ``'FEATURES'`` is stored in both ``metadata`` and
+``positional_metadata`` of ``Sequence`` or its sub-class. For each feature,
+its location is stored as boolean column in ``positional_metadata``; other
+qualifiers are stored as a ``dict`` in the ``list`` of
+``metadata['FEATURES']``. In the ``dict`` of qualifiers, there are a few
+extra keys, which end with ``'_'``, including:
+
+    1. ``'index_'``: the column index to the ``positional_metadata``,
+where the location of the current feature is stored.
+
+    2. ``'left_partial_'``: whether the exact lower boundary point of the
+feature is unknown.
+
+    3. ``'right_partial_'``: whether the exact upper boundary point of the
+feature is unknown.
+
+    4. ``'type_'``: the molecular type of the feature. Its value is from the
+header of the feature.
+
+Format Parameters
+-----------------
+
+Reader-specific Parameters
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+The ``constructor`` parameter can be used with the ``Sequence`` generator
+to specify the in-memory type of each GenBank record that is parsed.
+``constructor`` should be ``Sequence`` or a sub-class of ``Sequence``.
+It is also detected by the unit label on the LOCUS line. For example, if it
+is ``'bp'``, it will be read into ``DNA``; if it is ``'aa'``, it will be read
+into ``Protein``. Otherwise, it will be read into ``Sequence``. This default
+behavior is overridden by setting ``constructor``.
+
+``lowercase`` is another parameter available for all GenBank readers.
+By default, it is set to ``True`` to read in the ``'ORIGIN'`` sequence
+as lowercase letters. This parameter is passed to ``Sequence`` or
+its sub-class constructor.
+
+``seq_num`` is a parameter used with the ``Sequence``, ``DNA``, ``RNA``, and
+``Protein`` GenBank readers. It specifies which GenBank record to read from
+a GenBank file with multiple records in it.
+
+Examples
+--------
+
+Reading and Writing GenBank Files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Suppose we have the following GenBank file [example modified from [1_]::
+
+    LOCUS       3K1V_A       34 bp    RNA     linear   SYN 10-OCT-2012
+    DEFINITION  Chain A, Structure Of A Mutant Class-I Preq1.
+    ACCESSION   3K1V_A
+    VERSION     3K1V_A  GI:260656459
+    KEYWORDS    .
+    SOURCE      synthetic construct
+      ORGANISM  synthetic construct
+                other sequences; artificial sequences.
+    REFERENCE   1  (bases 1 to 34)
+      AUTHORS   Klein,D.J., Edwards,T.E. and Ferre-D'Amare,A.R.
+      TITLE     Cocrystal structure of a class I preQ1 riboswitch
+      JOURNAL   Nat. Struct. Mol. Biol. 16 (3), 343-344 (2009)
+       PUBMED   19234468
+    COMMENT     SEQRES.
+    FEATURES             Location/Qualifiers
+         source          1..34
+                         /organism="synthetic construct"
+                         /mol_type="other RNA"
+                         /db_xref="taxon:32630"
+    ORIGIN
+            1 agaggttcta gcacatccct ctataaaaaa ctaa
+    //
+
+>>> gb = ['LOCUS       3K1V_A     34 bp   RNA    linear   SYN 10-OCT-2012\\n',
+...       'DEFINITION  Chain A, Structure Of A Mutant Class-I Preq1.\\n',
+...       'ACCESSION   3K1V_A\\n',
+...       'VERSION     3K1V_A  GI:260656459\\n',
+...       'KEYWORDS    .\\n',
+...       'SOURCE      synthetic construct\\n',
+...       '  ORGANISM  synthetic construct\\n',
+...       '            other sequences; artificial sequences.\\n',
+...       'REFERENCE   1  (bases 1 to 34)\\n',
+...       "  AUTHORS   Klein,D.J., Edwards,T.E. and Ferre-D'Amare,A.R.\\n",
+...       '  TITLE     Cocrystal structure of a class I preQ1 riboswitch\\n',
+...       '  JOURNAL   Nat. Struct. Mol. Biol. 16 (3), 343-344 (2009)\\n',
+...       '   PUBMED   19234468\\n',
+...       'COMMENT     SEQRES.\\n',
+...       'FEATURES             Location/Qualifiers\\n',
+...       '     source          1..34\\n',
+...       '                     /organism="synthetic construct"\\n',
+...       '                     /mol_type="other RNA"\\n',
+...       '                     /db_xref="taxon:32630"\\n',
+...       'ORIGIN\\n',
+...       '        1 agaggttcta gcacatccct ctataaaaaa ctaa\\n',
+...       '//\\n']
+
+Now we can read it as ``DNA`` object:
+
+>>> from skbio import DNA, RNA, Sequence
+>>> dna_seq = DNA.read(gb)
+>>> dna_seq
+DNA
+-----------------------------------------------------------------
+Metadata:
+    'ACCESSION': '3K1V_A'
+    'COMMENT': 'SEQRES.'
+    'DEFINITION': 'Chain A, Structure Of A Mutant Class-I Preq1.'
+    'FEATURES': <class 'list'>
+    'KEYWORDS': '.'
+    'LOCUS': <class 'dict'>
+    'REFERENCE': <class 'list'>
+    'SOURCE': <class 'dict'>
+    'VERSION': '3K1V_A  GI:260656459'
+Positional metadata:
+    0: <dtype: bool>
+Stats:
+    length: 34
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 35.29%
+-----------------------------------------------------------------
+0 AGAGGTTCTA GCACATCCCT CTATAAAAAA CTAA
+
+
+Since this is a riboswitch molecule, we may want to read it as ``RNA``.
+As the GenBank file usually have ``'t'`` instead of ``'u'`` in the
+sequence, we can read it as ``RNA`` by converting ``'t'`` to ``'u'``:
+
+>>> rna_seq = RNA.read(gb)
+>>> rna_seq
+RNA
+-----------------------------------------------------------------
+Metadata:
+    'ACCESSION': '3K1V_A'
+    'COMMENT': 'SEQRES.'
+    'DEFINITION': 'Chain A, Structure Of A Mutant Class-I Preq1.'
+    'FEATURES': <class 'list'>
+    'KEYWORDS': '.'
+    'LOCUS': <class 'dict'>
+    'REFERENCE': <class 'list'>
+    'SOURCE': <class 'dict'>
+    'VERSION': '3K1V_A  GI:260656459'
+Positional metadata:
+    0: <dtype: bool>
+Stats:
+    length: 34
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 35.29%
+-----------------------------------------------------------------
+0 AGAGGUUCUA GCACAUCCCU CUAUAAAAAA CUAA
+
+>>> rna_seq == dna_seq.transcribe()
+True
+
+>>> from io import StringIO
+>>> with StringIO() as fh:
+...     print(dna_seq.write(fh, format='genbank').getvalue())
+LOCUS       3K1V_A   34 bp   RNA   linear   SYN   10-OCT-2012
+DEFINITION  Chain A, Structure Of A Mutant Class-I Preq1.
+ACCESSION   3K1V_A
+VERSION     3K1V_A  GI:260656459
+KEYWORDS    .
+SOURCE      synthetic construct
+  ORGANISM  synthetic construct
+            other sequences; artificial sequences.
+REFERENCE   1  (bases 1 to 34)
+  AUTHORS   Klein,D.J., Edwards,T.E. and Ferre-D'Amare,A.R.
+  TITLE     Cocrystal structure of a class I preQ1 riboswitch
+  JOURNAL   Nat. Struct. Mol. Biol. 16 (3), 343-344 (2009)
+  PUBMED    19234468
+COMMENT     SEQRES.
+FEATURES             Location/Qualifiers
+       source        1..34
+                     /db_xref="taxon:32630"
+                     /mol_type="other RNA"
+                     /organism="synthetic construct"
+ORIGIN
+        1 agaggttcta gcacatccct ctataaaaaa ctaa
+//
+<BLANKLINE>
+
+References
+----------
+.. [1_] http://www.ncbi.nlm.nih.gov/nuccore/3K1V_A
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+from future.builtins import range, zip
+
+import re
+import numpy as np
+import pandas as pd
+from datetime import datetime
+from functools import partial
+
+from skbio.io import create_format, GenBankFormatError
+from skbio.io.format._base import (
+    _get_nth_sequence, _line_generator, _too_many_blanks)
+from skbio.util._misc import chunk_str
+from skbio.sequence import Sequence, DNA, RNA, Protein
+
+
+genbank = create_format('genbank')
+
+# date format in locus line of genbank record
+_TIME_FORMAT = '%d-%b-%Y'
+# This list is ordered
+# used to read and write genbank file.
+_HEADERS = ['LOCUS',
+            'DEFINITION',
+            'ACCESSION',
+            'VERSION',
+            'DBSOURCE',
+            'DBLINK',
+            'KEYWORDS',
+            'SOURCE',
+            'REFERENCE',
+            'COMMENT',
+            'FEATURES',
+            'ORIGIN']
+
+
+ at genbank.sniffer()
+def _genbank_sniffer(fh):
+    # check the 1st real line is a valid LOCUS line
+    if _too_many_blanks(fh, 5):
+        return False, {}
+    try:
+        line = next(_line_generator(fh, skip_blanks=True, strip=False))
+    except StopIteration:
+        return False, {}
+
+    try:
+        _parse_locus([line])
+    except GenBankFormatError:
+        return False, {}
+    return True, {}
+
+
+ at genbank.reader(None)
+def _genbank_to_generator(fh, constructor=None, **kwargs):
+    for record in _parse_genbanks(fh):
+        yield _construct(record, constructor, **kwargs)
+
+
+ at genbank.reader(Sequence)
+def _genbank_to_sequence(fh, seq_num=1, **kwargs):
+    record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
+    return _construct(record, Sequence, **kwargs)
+
+
+ at genbank.reader(DNA)
+def _genbank_to_dna(fh, seq_num=1, **kwargs):
+    record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
+    return _construct(record, DNA, **kwargs)
+
+
+ at genbank.reader(RNA)
+def _genbank_to_rna(fh, seq_num=1, **kwargs):
+    record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
+    return _construct(record, RNA, **kwargs)
+
+
+ at genbank.reader(Protein)
+def _genbank_to_protein(fh, seq_num=1, **kwargs):
+    record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
+    return _construct(record, Protein, **kwargs)
+
+
+ at genbank.writer(None)
+def _generator_to_genbank(obj, fh):
+    for obj_i in obj:
+        _serialize_single_genbank(obj_i, fh)
+
+
+ at genbank.writer(Sequence)
+def _sequence_to_genbank(obj, fh):
+    _serialize_single_genbank(obj, fh)
+
+
+ at genbank.writer(DNA)
+def _dna_to_genbank(obj, fh):
+    _serialize_single_genbank(obj, fh)
+
+
+ at genbank.writer(RNA)
+def _rna_to_genbank(obj, fh):
+    _serialize_single_genbank(obj, fh)
+
+
+ at genbank.writer(Protein)
+def _protein_to_genbank(obj, fh):
+    _serialize_single_genbank(obj, fh)
+
+
+def _construct(record, constructor=None, **kwargs):
+    '''Construct the object of Sequence, DNA, RNA, or Protein.
+    '''
+    seq, md, pmd = record
+    if 'lowercase' not in kwargs:
+        kwargs['lowercase'] = True
+    if constructor is None:
+        unit = md['LOCUS']['unit']
+        if unit == 'bp':
+            # RNA mol type has T instead of U for genbank from from NCBI
+            constructor = DNA
+        elif unit == 'aa':
+            constructor = Protein
+
+    if constructor == RNA:
+        return DNA(
+            seq, metadata=md, positional_metadata=pmd, **kwargs).transcribe()
+    else:
+        return constructor(
+            seq, metadata=md, positional_metadata=pmd, **kwargs)
+
+
+def _parse_genbanks(fh):
+    data_chunks = []
+    for line in _line_generator(fh, skip_blanks=True, strip=False):
+        if line.startswith('//'):
+            yield _parse_single_genbank(data_chunks)
+            data_chunks = []
+        else:
+            data_chunks.append(line)
+
+
+def _parse_single_genbank(chunks):
+    metadata = {}
+    positional_metadata = None
+    sequence = ''
+    # each section starts with a HEADER without indent.
+    section_splitter = _yield_section(
+        lambda x: not x[0].isspace(), strip=False)
+    for section in section_splitter(chunks):
+        header = section[0].split(None, 1)[0]
+        parser = _PARSER_TABLE.get(
+            header, _parse_section_default)
+
+        if header == 'FEATURES':
+            # This requires 'LOCUS' line parsed before 'FEATURES', which should
+            # be true and is implicitly checked by the sniffer.
+            parser = partial(
+                parser, length=metadata['LOCUS']['size'])
+
+        parsed = parser(section)
+
+        # reference can appear multiple times
+        if header == 'REFERENCE':
+            if header in metadata:
+                metadata[header].append(parsed)
+            else:
+                metadata[header] = [parsed]
+        elif header == 'ORIGIN':
+            sequence = parsed
+        elif header == 'FEATURES':
+            metadata[header] = parsed[0]
+            positional_metadata = pd.concat(parsed[1], axis=1)
+        else:
+            metadata[header] = parsed
+    return sequence, metadata, positional_metadata
+
+
+def _serialize_single_genbank(obj, fh):
+    '''Write a GenBank record.
+
+    Always write it in NCBI canonical way:
+    1. sequence in lowercase
+    2. 'u' as 't' even in RNA molecules.
+    '''
+    md = obj.metadata
+    for header in _HEADERS:
+        if header in md:
+            serializer = _SERIALIZER_TABLE.get(
+                header, _serialize_section_default)
+            out = serializer(header, md[header])
+            # test if 'out' is a iterator.
+            # cf. Effective Python Item 17
+            if iter(out) is iter(out):
+                for s in out:
+                    fh.write(s)
+            else:
+                fh.write(out)
+    # always write RNA seq as DNA
+    if isinstance(obj, RNA):
+        obj = obj.reverse_transcribe()
+
+    # always write in lowercase
+    seq_str = str(obj).lower()
+
+    for s in _serialize_origin(seq_str):
+        fh.write(s)
+    fh.write('//\n')
+
+
+def _parse_locus(lines):
+    '''Parse the line LOCUS.
+
+    Format:
+    #    Positions  Contents
+    #    ---------  --------
+    #    00:06      LOCUS
+    #    06:12      spaces
+    #    12:??      Locus name
+    #    ??:??      space
+    #    ??:29      Length of sequence, right-justified
+    #    29:33      space, bp/aa/rc, space
+    #    33:41      molecule type (can be blank): DNA, ssDNA, dsRNA, tRNA, etc.
+    #    41:42      space
+    #    42:51      Blank (implies linear), linear or circular
+    #    51:52      space
+    #    52:55      The division code (e.g. BCT, VRL, INV)
+    #    55:62      space
+    #    62:73      Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
+    '''
+    line = lines[0]
+    pattern = (r'LOCUS'
+               ' +([^\s]+)'
+               ' +([0-9]+)'
+               ' +(bp|aa|rc)'
+               ' +(.*DNA|.*RNA)?'
+               ' +(linear|circular)?'
+               ' +(PRI|ROD|MAM|VRT|INV|PLN|BCT|VRL|PHG|'
+               'SYN|UNA|EST|PAT|STS|GSS|HTG|HTC|ENV|CON)'
+               ' +([0-9]{2}-[A-Z]{3}-[0-9]{4})')
+    matches = re.match(pattern, line)
+
+    try:
+        res = dict(zip(
+            ['locus_name', 'size', 'unit', 'mol_type',
+             'shape', 'division', 'date'],
+            matches.groups()))
+    except:
+        raise GenBankFormatError(
+            "Could not parse the LOCUS line:\n%s" % line)
+
+    res['size'] = int(res['size'])
+    res['date'] = datetime.strptime(res['date'], _TIME_FORMAT)
+    return res
+
+
+def _serialize_locus(header, obj, indent=12):
+    '''Serilize LOCUS line.
+
+    Parameters
+    ----------
+    obj : dict
+    '''
+    # use 'or' to convert None to ''
+    kwargs = {k: v or '' for k, v in obj.items()}
+    # convert datetime to str
+    kwargs['date'] = kwargs['date'].strftime(_TIME_FORMAT).upper()
+
+    return ('{header:<{indent}}{locus_name}   {size} {unit}'
+            '   {mol_type}   {shape}   {division}   {date}\n').format(
+                header=header, indent=indent, **kwargs)
+
+
+def _parse_reference(lines):
+    '''Parse single REFERENCE field.
+    '''
+    res = {}
+    # magic number 11: the non keyworded lines in REFERENCE
+    # are at least indented with 11 spaces.
+    feature_indent = ' ' * 11
+    section_splitter = _yield_section(
+        lambda x: not x.startswith(feature_indent),
+        skip_blanks=True, strip=False)
+    for section in section_splitter(lines):
+        label, data = _parse_section_default(
+            section, join_delimitor=' ', return_label=True)
+        res[label] = data
+    return res
+
+
+def _serialize_reference(header, obj, indent=12):
+    '''Serialize REFERENCE.
+
+    Parameters
+    ----------
+    obj : list
+    '''
+    padding = '  '
+    sort_order = {'REFERENCE': 0, 'AUTHORS': 1,
+                  'TITLE': 2, 'JOURNAL': 3, 'PUBMED': 4}
+    for obj_i in obj:
+        ref_i = []
+        for h in sorted(obj_i, key=lambda k: sort_order.get(k, 100)):
+            if h == header:
+                s = '{h:<{indent}}{ref}'.format(
+                    h=h, indent=indent, ref=obj_i[h])
+            else:
+                s = '{h:<{indent}}{value}'.format(
+                    h=padding + h, indent=indent, value=obj_i[h])
+            ref_i.append(s)
+        yield '%s\n' % '\n'.join(ref_i)
+
+
+def _parse_source(lines):
+    '''Parse SOURCE field.
+    '''
+    res = {}
+    # magic number 11: the non keyworded lines in SOURCE
+    # are at least indented with 11 spaces.
+    feature_indent = ' ' * 11
+    section_splitter = _yield_section(
+        lambda x: not x.startswith(feature_indent),
+        skip_blanks=True, strip=False)
+    # SOURCE line is not informative; skip it
+    _, organism = list(section_splitter(lines))
+
+    res['ORGANISM'] = organism[0].split(None, 1)[1].strip()
+    res['taxonomy'] = ' '.join([i.strip() for i in organism[1:]])
+    return res
+
+
+def _serialize_source(header, obj, indent=12):
+    '''Serialize SOURCE.
+
+    Parameters
+    ----------
+    obj : dict
+    '''
+    s = ('{header:<{indent}}{organism}\n'
+         '{h:<{indent}}{organism}\n'
+         '{space}{taxonomy}\n').format(
+             header=header, indent=indent,
+             h='  ORGANISM', organism=obj['ORGANISM'],
+             space=' ' * 12, taxonomy=obj['taxonomy'])
+    return s
+
+
+def _parse_features(lines, length):
+    '''Parse FEATURES field.
+    '''
+    features = []
+    positional_metadata = []
+    # skip the 1st FEATURES line
+    if lines[0].startswith('FEATURES'):
+        lines = lines[1:]
+    # magic number 20: the lines following header of each feature
+    # are at least indented with 20 spaces.
+    feature_indent = ' ' * 20
+    section_splitter = _yield_section(
+        lambda x: not x.startswith(feature_indent),
+        skip_blanks=True, strip=False)
+    for i, section in enumerate(section_splitter(lines)):
+        # print(i) ; continue
+        feature, pmd = _parse_single_feature(section, length, i)
+        features.append(feature)
+        positional_metadata.append(pmd)
+    return features, positional_metadata
+
+
+def _serialize_features(header, obj, indent=21):
+    first = True
+    for feature in obj:
+        if first:
+            first = False
+            yield '{header:<{indent}}Location/Qualifiers\n{feat}'.format(
+                header=header, indent=indent,
+                feat=_serialize_single_feature(feature, indent))
+        else:
+            yield _serialize_single_feature(feature, indent)
+
+
+def _parse_single_feature(lines, length, index):
+    '''Parse a feature.
+
+    Returns
+    -------
+    tuple
+        Tuple of a dict of `metadata` and a pandas.Series of
+        `positional_metadata` for the feature.
+
+    '''
+    feature = {}
+    feature['index_'] = index
+    # each component of a feature starts with '/', except the 1st
+    # component of location.
+    section_splitter = _yield_section(
+        lambda x: x.startswith('/'), strip=True)
+    first = True
+    for section in section_splitter(lines):
+        if first:
+            # first section is the Location string
+            first = False
+            type, location = _parse_section_default(
+                section, join_delimitor='', return_label=True)
+            feature['type_'] = type
+            feature['location'] = location
+            loc, loc_pmd = _parse_loc_str(location, length)
+            feature.update(loc)
+        else:
+            # following sections are Qualifiers
+            k, v = _parse_section_default(
+                section, label_delimitor='=',
+                join_delimitor=' ', return_label=True)
+            k = k[1:]
+
+            # some Qualifiers can appear multiple times
+            if k in feature:
+                if not isinstance(feature[k], list):
+                    feature[k] = [feature[k]]
+                feature[k].append(v)
+            else:
+                feature[k] = v
+    return feature, loc_pmd
+
+
+def _serialize_single_feature(obj, indent=21):
+    padding = ' ' * 8
+    qualifiers = []
+    for k in sorted(obj):
+        if k.endswith('_') or k in ('location', 'type'):
+            continue
+        v = obj[k]
+        if isinstance(v, list):
+            for vi in v:
+                qualifiers.append(_serialize_qualifier(k, vi))
+        else:
+            qualifiers.append(_serialize_qualifier(k, v))
+
+    qualifiers = [' ' * indent + i for i in qualifiers]
+    return '{header:>{indent}}{loc}\n{qualifiers}\n'.format(
+        header=obj['type_'] + padding, loc=obj['location'],
+        indent=indent, qualifiers='\n'.join(qualifiers))
+
+
+def _serialize_qualifier(key, value):
+    '''Serialize a Qualifier in a feature.
+
+    Parameters
+    ----------
+    value : int, str
+    '''
+    # if value is empty
+    if not value:
+        return '/%s' % key
+
+    return '/{k}={v}'.format(k=key, v=value)
+
+
+def _parse_loc_str(loc_str, length):
+    '''Parse location string.
+
+    Warning: This converts coordinates to 0-based from 1-based as
+    in GenBank format.
+
+    The location descriptor can be one of the following:
+    (a) a single base number. e.g. 467
+    (b) a site between two indicated adjoining bases. e.g. 123^124
+    (c) a single base chosen from within a specified range of bases (not
+        allowed for new entries). e.g. 102.110
+    (d) the base numbers delimiting a sequence span. e.g.340..565
+    (e) a remote entry identifier followed by a local location
+        descriptor (i.e., a-d). e.g. J00194.1:100..202
+
+    TODO:
+    handle (b), (c), (e) cases correctly
+    '''
+    pmd = np.zeros(length, dtype=bool)
+    res = {'rc_': False,
+           'left_partial_': False,
+           'right_partial_': False}
+    items = re.split('[(),]+', loc_str)
+    operators = ['join', 'complement', 'order']
+    if 'complement' in items:
+        res['rc_'] = True
+    for i in items:
+        i = i.strip()
+        if i in operators or not i:
+            continue
+        elif ':' in i:  # (e)
+            index = []
+        elif '..' in i:  # (d)
+            beg, end = i.split('..')
+            if beg.startswith('<'):
+                beg = beg[1:]
+                res['left_partial_'] = True
+            if end.startswith('>'):
+                end = end[1:]
+                res['right_partial_'] = True
+            beg = int(beg)
+            end = int(end)
+            index = range(beg-1, end)
+        elif '.' in i:  # (c)
+            index = []
+        elif i.isdigit():  # (a)
+            index = int(i) - 1
+        elif '^' in i:  # (b)
+            index = []
+        else:
+            raise GenBankFormatError(
+                'Could not parse location string: "%s"' %
+                loc_str)
+        pmd[index] = True
+
+    return res, pd.Series(pmd)
+
+
+def _parse_origin(lines):
+    '''Parse the ORIGIN section for sequence.
+    '''
+    sequence = []
+    for line in lines:
+        if line.startswith('ORIGIN'):
+            continue
+        # remove the number at the beg of each line
+        items = line.split()
+        sequence.append(''.join(items[1:]))
+    return ''.join(sequence)
+
+
+def _serialize_origin(seq, indent=9):
+    '''Serialize seq to ORIGIN.
+
+    Parameters
+    ----------
+    seq : str
+    '''
+    n = 1
+    line_size = 60
+    frag_size = 10
+    for i in range(0, len(seq), line_size):
+        line = seq[i:i+line_size]
+        s = '{n:>{indent}} {s}\n'.format(
+            n=n, indent=indent, s=chunk_str(line, frag_size, ' '))
+        if n == 1:
+            s = 'ORIGIN\n' + s
+        n = n + line_size
+        yield s
+
+
+def _parse_section_default(
+        lines, label_delimitor=None, join_delimitor=' ', return_label=False):
+    '''Parse sections in default way.
+
+    Do 2 things:
+        1. split first line with label_delimitor for label
+        2. join all the lines into one str with join_delimitor.
+    '''
+    data = []
+    first = True
+    label = None
+    for line in lines:
+        if first:
+            items = line.split(label_delimitor, 1)
+
+            if len(items) == 2:
+                label, section = items
+            else:
+                label = items[0]
+                section = ""
+            data.append(section)
+            first = False
+        else:
+            data.append(line)
+    data = join_delimitor.join(i.strip() for i in data)
+    if return_label:
+        return label, data
+    else:
+        return data
+
+
+def _serialize_section_default(header, obj, indent=12):
+    return '{header:<{indent}}{obj}\n'.format(
+        header=header, obj=obj, indent=indent)
+
+
+def _yield_section(is_another_section, **kwargs):
+    '''Returns function that returns successive sections from file.
+
+    Parameters
+    ----------
+    is_another_section : callable
+        It takes a string as input and return a boolean indicating
+        a new section starts.
+    kwargs : dict, optional
+        Keyword arguments will be passed to `_line_generator`.
+
+    Returns
+    -------
+    function
+        A function accept a list of lines as input and return
+        a generator to yield section one by one.
+    '''
+    def parser(lines):
+        curr = []
+        for line in _line_generator(lines, **kwargs):
+            # if we find another, return the previous section
+            if is_another_section(line):
+                if curr:
+                    yield curr
+                    curr = []
+            curr.append(line)
+        # don't forget to return the last section in the file
+        if curr:
+            yield curr
+    return parser
+
+
+_PARSER_TABLE = {
+    'LOCUS': _parse_locus,
+    'SOURCE': _parse_source,
+    'REFERENCE': _parse_reference,
+    'FEATURES': _parse_features,
+    'ORIGIN': _parse_origin}
+
+
+_SERIALIZER_TABLE = {
+    'LOCUS': _serialize_locus,
+    'SOURCE': _serialize_source,
+    'REFERENCE': _serialize_reference,
+    'FEATURES': _serialize_features}
diff --git a/skbio/io/format/newick.py b/skbio/io/format/newick.py
index 27c5c48..3796c87 100644
--- a/skbio/io/format/newick.py
+++ b/skbio/io/format/newick.py
@@ -173,10 +173,10 @@ Examples
 --------
 This is a simple Newick string.
 
->>> from StringIO import StringIO
+>>> from io import StringIO
 >>> from skbio import read
 >>> from skbio.tree import TreeNode
->>> f = StringIO(u"((D, E)B, (F, G)C)A;")
+>>> f = StringIO("((D, E)B, (F, G)C)A;")
 >>> tree = read(f, format="newick", into=TreeNode)
 >>> f.close()
 >>> print(tree.ascii_art())
@@ -190,7 +190,7 @@ This is a simple Newick string.
 
 This is a complex Newick string.
 
->>> f = StringIO(u"[example](a:0.1, 'b_b''':0.2, (c:0.3, d_d:0.4)e:0.5)f:0.0;")
+>>> f = StringIO("[example](a:0.1, 'b_b''':0.2, (c:0.3, d_d:0.4)e:0.5)f:0.0;")
 >>> tree = read(f, format="newick", into=TreeNode)
 >>> f.close()
 >>> print(tree.ascii_art())
diff --git a/skbio/io/format/ordination.py b/skbio/io/format/ordination.py
index 422a29c..465be22 100644
--- a/skbio/io/format/ordination.py
+++ b/skbio/io/format/ordination.py
@@ -139,8 +139,8 @@ ordination results in ``ordination`` format::
 
 Load the ordination results from the file:
 
->>> from StringIO import StringIO
->>> from skbio.stats.ordination import OrdinationResults
+>>> from io import StringIO
+>>> from skbio import OrdinationResults
 >>> or_f = StringIO(
 ...  "Eigvals\t4\n"
 ...  "0.36\t0.18\t0.07\t0.08\n"
@@ -191,8 +191,9 @@ from __future__ import (absolute_import, division, print_function,
 from future.builtins import zip
 
 import numpy as np
+import pandas as pd
 
-from skbio.stats.ordination import OrdinationResults
+from skbio._base import OrdinationResults
 from skbio.io import create_format, OrdinationFormatError
 
 ordination = create_format('ordination')
@@ -232,32 +233,32 @@ def _ordination_to_ordination_results(fh):
                                   'proportion explained values')
     _check_empty_line(fh)
 
-    species, species_ids = _parse_array_section(fh, 'Species')
+    species = _parse_array_section(fh, 'Species')
     _check_length_against_eigvals(species, eigvals,
                                   'coordinates per species')
     _check_empty_line(fh)
 
-    site, site_ids = _parse_array_section(fh, 'Site')
+    site = _parse_array_section(fh, 'Site')
     _check_length_against_eigvals(site, eigvals,
                                   'coordinates per site')
     _check_empty_line(fh)
 
     # biplot does not have ids to parse (the other arrays do)
-    biplot, _ = _parse_array_section(fh, 'Biplot', has_ids=False)
+    biplot = _parse_array_section(fh, 'Biplot', has_ids=False)
     _check_empty_line(fh)
 
-    cons, cons_ids = _parse_array_section(fh, 'Site constraints')
+    cons = _parse_array_section(fh, 'Site constraints')
 
-    if cons_ids is not None and site_ids is not None:
-        if cons_ids != site_ids:
+    if cons is not None and site is not None:
+        if not np.array_equal(cons.index, site.index):
             raise OrdinationFormatError(
                 "Site constraints ids and site ids must be equal: %s != %s" %
-                (cons_ids, site_ids))
+                (cons.index, site.index))
 
     return OrdinationResults(
-        eigvals=eigvals, species=species, site=site, biplot=biplot,
-        site_constraints=cons, proportion_explained=prop_expl,
-        species_ids=species_ids, site_ids=site_ids)
+        short_method_name='', long_method_name='', eigvals=eigvals,
+        features=species, samples=site, biplot_scores=biplot,
+        sample_constraints=cons, proportion_explained=prop_expl)
 
 
 def _parse_header(fh, header_id, num_dimensions):
@@ -311,7 +312,8 @@ def _parse_vector_section(fh, header_id):
             raise OrdinationFormatError(
                 "Reached end of file while looking for line containing values "
                 "for %s section." % header_id)
-        vals = np.asarray(line.strip().split('\t'), dtype=np.float64)
+        vals = pd.Series(np.asarray(line.strip().split('\t'),
+                                    dtype=np.float64))
         if len(vals) != num_vals:
             raise OrdinationFormatError(
                 "Expected %d values in %s section, but found %d." %
@@ -362,18 +364,20 @@ def _parse_array_section(fh, header_id, has_ids=True):
                     "Expected %d values, but found %d in row %d." %
                     (cols, len(vals), i + 1))
             data[i, :] = np.asarray(vals, dtype=np.float64)
-    return data, ids
+        data = pd.DataFrame(data, index=ids)
+
+    return data
 
 
 @ordination.writer(OrdinationResults)
 def _ordination_results_to_ordination(obj, fh):
     _write_vector_section(fh, 'Eigvals', obj.eigvals)
     _write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
-    _write_array_section(fh, 'Species', obj.species, obj.species_ids)
-    _write_array_section(fh, 'Site', obj.site, obj.site_ids)
-    _write_array_section(fh, 'Biplot', obj.biplot)
-    _write_array_section(fh, 'Site constraints', obj.site_constraints,
-                         obj.site_ids, include_section_separator=False)
+    _write_array_section(fh, 'Species', obj.features)
+    _write_array_section(fh, 'Site', obj.samples)
+    _write_array_section(fh, 'Biplot', obj.biplot_scores, has_ids=False)
+    _write_array_section(fh, 'Site constraints', obj.sample_constraints,
+                         include_section_separator=False)
 
 
 def _write_vector_section(fh, header_id, vector):
@@ -384,11 +388,11 @@ def _write_vector_section(fh, header_id, vector):
     fh.write("%s\t%d\n" % (header_id, shape))
 
     if vector is not None:
-        fh.write(_format_vector(vector))
+        fh.write(_format_vector(vector.values))
     fh.write("\n")
 
 
-def _write_array_section(fh, header_id, data, ids=None,
+def _write_array_section(fh, header_id, data, has_ids=True,
                          include_section_separator=True):
     # write section header
     if data is None:
@@ -399,11 +403,11 @@ def _write_array_section(fh, header_id, data, ids=None,
 
     # write section data
     if data is not None:
-        if ids is None:
-            for vals in data:
+        if not has_ids:
+            for vals in data.values:
                 fh.write(_format_vector(vals))
         else:
-            for id_, vals in zip(ids, data):
+            for id_, vals in zip(data.index, data.values):
                 fh.write(_format_vector(vals, id_))
 
     if include_section_separator:
diff --git a/skbio/io/format/phylip.py b/skbio/io/format/phylip.py
index 197f116..3e056c9 100644
--- a/skbio/io/format/phylip.py
+++ b/skbio/io/format/phylip.py
@@ -27,12 +27,12 @@ An example PHYLIP-formatted file taken from [3]_::
 
 Format Support
 --------------
-**Has Sniffer: No**
+**Has Sniffer: Yes**
 
 +------+------+---------------------------------------------------------------+
 |Reader|Writer|                          Object Class                         |
 +======+======+===============================================================+
-|No    |Yes   |:mod:`skbio.alignment.Alignment`                               |
+|Yes   |Yes   |:mod:`skbio.alignment.TabularMSA`                              |
 +------+------+---------------------------------------------------------------+
 
 Format Specification
@@ -50,10 +50,9 @@ relax this rule to allow for longer sequence identifiers. See the
 The format described here is "sequential" format. The original PHYLIP format
 specification [3]_ describes both sequential and interleaved formats.
 
-.. note:: scikit-bio currently only supports writing strict, sequential
-   PHYLIP-formatted files from an ``skbio.alignment.Alignment``. It does not
-   yet support reading PHYLIP-formatted files, nor does it support relaxed or
-   interleaved PHYLIP formats.
+.. note:: scikit-bio currently supports reading and writing strict, sequential
+   PHYLIP-formatted files. Relaxed and/or interleaved PHYLIP formats are not
+   supported.
 
 Header Section
 ^^^^^^^^^^^^^^
@@ -82,13 +81,16 @@ must have spaces appended to them to reach the 10 character fixed width. Within
 an ID, all characters except newlines are supported, including spaces,
 underscores, and numbers.
 
-.. note:: While not explicitly stated in the original PHYLIP format
-   description, scikit-bio only supports writing unique sequence identifiers
-   (i.e., duplicates are not allowed). Uniqueness is required because an
-   ``skbio.alignment.Alignment`` cannot be created with duplicate IDs.
+.. note:: When reading a PHYLIP-formatted file into an
+   ``skbio.alignment.TabularMSA`` object, sequence identifiers/labels are
+   stored as ``TabularMSA`` index labels (``index`` property).
+
+   When writing an ``skbio.alignment.TabularMSA`` object as a PHYLIP-formatted
+   file, ``TabularMSA`` index labels will be converted to strings and written
+   as sequence identifiers/labels.
 
    scikit-bio supports the empty string (``''``) as a valid sequence ID. An
-   empty ID will be padded with 10 spaces.
+   empty ID will be padded with 10 spaces when writing.
 
 Sequence characters immediately follow the sequence ID. They *must* start at
 the 11th character in the line, as the first 10 characters are reserved for the
@@ -100,15 +102,16 @@ PHYLIP specification uses ``-`` as a gap character, though older versions also
 supported ``.``. The sequence characters may contain optional spaces (e.g., to
 improve readability), and both upper and lower case characters are supported.
 
-.. note:: scikit-bio will write a PHYLIP-formatted file even if the alignment's
-   sequence characters are not valid IUPAC characters. This differs from the
-   PHYLIP specification, which states that a PHYLIP-formatted file can only
-   contain valid IUPAC characters. To check whether all characters are valid
-   before writing, the user can call ``Alignment.is_valid()``.
+.. note:: scikit-bio will read/write a PHYLIP-formatted file as long as the
+   alignment's sequence characters are valid for the type of in-memory sequence
+   object being read into or written from. This differs from the PHYLIP
+   specification, which states that a PHYLIP-formatted file can only contain
+   valid IUPAC characters. See the ``constructor`` format parameter below for
+   details.
 
    Since scikit-bio supports both ``-`` and ``.`` as gap characters (e.g., in
-   ``skbio.alignment.Alignment``), both are supported when writing a
-   PHYLIP-formatted file.
+   ``DNA``, ``RNA``, and ``Protein`` sequence objects), both are supported when
+   reading/writing a PHYLIP-formatted file.
 
    When writing a PHYLIP-formatted file, scikit-bio will split up each sequence
    into chunks that are 10 characters long. Each chunk will be separated by a
@@ -116,26 +119,46 @@ improve readability), and both upper and lower case characters are supported.
    format). It will *not* be wrapped across multiple lines. Sequences are
    chunked in this manner for improved readability, and because most example
    PHYLIP files are chunked in a similar way (e.g., see the example file
-   above). Note that this chunking is not required by the PHYLIP format.
+   above). Note that this chunking is not required when reading
+   PHYLIP-formatted files, nor by the PHYLIP format specification itself.
+
+Format Parameters
+-----------------
+The only supported format parameter is ``constructor``, which specifies the
+type of in-memory sequence object to read each aligned sequence into. This must
+be a subclass of ``IUPACSequence`` (e.g., ``DNA``, ``RNA``, ``Protein``) and is
+a required format parameter. For example, if you know that the PHYLIP file
+you're reading contains DNA sequences, you would pass ``constructor=DNA`` to
+the reader call.
 
 Examples
 --------
-Let's create an alignment with three DNA sequences of equal length:
+Let's create a ``TabularMSA`` with three DNA sequences:
 
->>> from skbio import Alignment, DNA
+>>> from skbio import TabularMSA, DNA
 >>> seqs = [DNA('ACCGTTGTA-GTAGCT', metadata={'id':'seq1'}),
 ...         DNA('A--GTCGAA-GTACCT', metadata={'id':'sequence-2'}),
 ...         DNA('AGAGTTGAAGGTATCT', metadata={'id':'3'})]
->>> aln = Alignment(seqs)
->>> aln
-<Alignment: n=3; mean +/- std length=16.00 +/- 0.00>
-
-Now let's write the alignment to file in PHYLIP format, and take a look at the
-output:
+>>> msa = TabularMSA(seqs, minter='id')
+>>> msa
+TabularMSA[DNA]
+----------------------
+Stats:
+    sequence count: 3
+    position count: 16
+----------------------
+ACCGTTGTA-GTAGCT
+A--GTCGAA-GTACCT
+AGAGTTGAAGGTATCT
+>>> msa.index
+Index(['seq1', 'sequence-2', '3'], dtype='object')
+
+Now let's write the ``TabularMSA`` to file in PHYLIP format and take a look at
+the output:
 
 >>> from io import StringIO
 >>> fh = StringIO()
->>> print(aln.write(fh, format='phylip').getvalue())
+>>> print(msa.write(fh, format='phylip').getvalue())
 3 16
 seq1      ACCGTTGTA- GTAGCT
 sequence-2A--GTCGAA- GTACCT
@@ -148,40 +171,36 @@ each sequence appears on a single line (sequential format). Also note that each
 sequence ID is padded with spaces to 10 characters in order to produce a fixed
 width column.
 
-If the sequence IDs in an alignment surpass the 10-character limit, an error
-will be raised when we try to write a PHYLIP file:
+If the index labels in a ``TabularMSA`` surpass the 10-character limit, an
+error will be raised when writing:
 
->>> long_id_seqs = [DNA('ACCGT', metadata={'id':'seq1'}),
-...                 DNA('A--GT', metadata={'id':'long-sequence-2'}),
-...                 DNA('AGAGT', metadata={'id':'seq3'})]
->>> long_id_aln = Alignment(long_id_seqs)
+>>> msa.index = ['seq1', 'long-sequence-2', 'seq3']
 >>> fh = StringIO()
->>> long_id_aln.write(fh, format='phylip')
+>>> msa.write(fh, format='phylip')
 Traceback (most recent call last):
     ...
-PhylipFormatError: Alignment can only be written in PHYLIP format if all \
-sequence IDs have 10 or fewer characters. Found sequence with ID \
-'long-sequence-2' that exceeds this limit. Use Alignment.update_ids to assign \
-shorter IDs.
+skbio.io._exception.PhylipFormatError: ``TabularMSA`` can only be written in \
+PHYLIP format if all sequence index labels have 10 or fewer characters. Found \
+sequence with index label 'long-sequence-2' that exceeds this limit. Use \
+``TabularMSA.reassign_index`` to assign shorter index labels.
 >>> fh.close()
 
-One way to work around this is to update the IDs to be shorter. The recommended
-way of accomplishing this is via ``Alignment.update_ids``, which provides a
-flexible way of creating a new ``Alignment`` with updated IDs. For example, to
-remap each of the IDs to integer-based IDs:
+One way to work around this is to assign shorter index labels. The recommended
+way to do this is via ``TabularMSA.reassign_index``. For example, to reassign
+default integer index labels:
 
->>> short_id_aln, _ = long_id_aln.update_ids()
->>> short_id_aln.ids()
-['1', '2', '3']
+>>> msa.reassign_index()
+>>> msa.index
+Int64Index([0, 1, 2], dtype='int64')
 
-We can now write the new alignment in PHYLIP format:
+We can now write the ``TabularMSA`` in PHYLIP format:
 
 >>> fh = StringIO()
->>> print(short_id_aln.write(fh, format='phylip').getvalue())
-3 5
-1         ACCGT
-2         A--GT
-3         AGAGT
+>>> print(msa.write(fh, format='phylip').getvalue())
+3 16
+0         ACCGTTGTA- GTAGCT
+1         A--GTCGAA- GTACCT
+2         AGAGTTGAAG GTATCT
 <BLANKLINE>
 >>> fh.close()
 
@@ -207,41 +226,135 @@ References
 from __future__ import (absolute_import, division, print_function,
                         unicode_literals)
 
-from skbio.alignment import Alignment
+from skbio.alignment import TabularMSA
 from skbio.io import create_format, PhylipFormatError
 from skbio.util._misc import chunk_str
 
-phylip = create_format('phylip')
 
+phylip = create_format('phylip')
 
- at phylip.writer(Alignment)
-def _alignment_to_phylip(obj, fh):
 
-    if obj.is_empty():
+ at phylip.sniffer()
+def _phylip_sniffer(fh):
+    # Strategy:
+    #   Read the header and a single sequence; verify that the sequence length
+    #   matches the header information.  Do not verify that the total number of
+    #   lines matches the header information, since that would require reading
+    #   the whole file.
+    try:
+        header = next(_line_generator(fh))
+        _, seq_len = _validate_header(header)
+        line = next(_line_generator(fh))
+        _validate_line(line, seq_len)
+    except (StopIteration, PhylipFormatError):
+        return False, {}
+    return True, {}
+
+
+ at phylip.reader(TabularMSA)
+def _phylip_to_tabular_msa(fh, constructor=None):
+    if constructor is None:
+        raise ValueError("Must provide `constructor`.")
+
+    seqs = []
+    index = []
+    for seq, ID in _parse_phylip_raw(fh):
+        seqs.append(constructor(seq))
+        index.append(ID)
+    return TabularMSA(seqs, index=index)
+
+
+ at phylip.writer(TabularMSA)
+def _tabular_msa_to_phylip(obj, fh):
+    sequence_count = obj.shape.sequence
+    if sequence_count < 1:
         raise PhylipFormatError(
-            "Alignment can only be written in PHYLIP format if there is at "
+            "TabularMSA can only be written in PHYLIP format if there is at "
             "least one sequence in the alignment.")
 
-    sequence_length = obj.sequence_length()
-    if sequence_length == 0:
+    sequence_length = obj.shape.position
+    if sequence_length < 1:
         raise PhylipFormatError(
-            "Alignment can only be written in PHYLIP format if there is at "
+            "TabularMSA can only be written in PHYLIP format if there is at "
             "least one position in the alignment.")
 
     chunk_size = 10
-    for id_ in obj.ids():
-        if len(id_) > chunk_size:
+    labels = [str(label) for label in obj.index]
+    for label in labels:
+        if len(label) > chunk_size:
             raise PhylipFormatError(
-                "Alignment can only be written in PHYLIP format if all "
-                "sequence IDs have %d or fewer characters. Found sequence "
-                "with ID '%s' that exceeds this limit. Use "
-                "Alignment.update_ids to assign shorter IDs." %
-                (chunk_size, id_))
+                "``TabularMSA`` can only be written in PHYLIP format if all "
+                "sequence index labels have %d or fewer characters. Found "
+                "sequence with index label '%s' that exceeds this limit. Use "
+                "``TabularMSA.reassign_index`` to assign shorter index labels."
+                % (chunk_size, label))
 
-    sequence_count = obj.sequence_count()
     fh.write('{0:d} {1:d}\n'.format(sequence_count, sequence_length))
 
     fmt = '{0:%d}{1}\n' % chunk_size
-    for seq in obj:
+    for label, seq in zip(labels, obj):
         chunked_seq = chunk_str(str(seq), chunk_size, ' ')
-        fh.write(fmt.format(seq.metadata['id'], chunked_seq))
+        fh.write(fmt.format(label, chunked_seq))
+
+
+def _validate_header(header):
+    header_vals = header.split()
+    try:
+        n_seqs, seq_len = [int(x) for x in header_vals]
+        if n_seqs < 1 or seq_len < 1:
+            raise PhylipFormatError(
+                'The number of sequences and the length must be positive.')
+    except ValueError:
+        raise PhylipFormatError(
+            'Found non-header line when attempting to read the 1st record '
+            '(header line should have two space-separated integers): '
+            '"%s"' % header)
+    return n_seqs, seq_len
+
+
+def _validate_line(line, seq_len):
+    if not line:
+        raise PhylipFormatError("Empty lines are not allowed.")
+    ID = line[:10].strip()
+    seq = line[10:].replace(' ', '')
+    if len(seq) != seq_len:
+        raise PhylipFormatError(
+            "The length of sequence %s is not %s as specified in the header."
+            % (ID, seq_len))
+    return (seq, ID)
+
+
+def _parse_phylip_raw(fh):
+    """Raw parser for PHYLIP files.
+
+    Returns a list of raw (seq, id) values.  It is the responsibility of the
+    caller to construct the correct in-memory object to hold the data.
+
+    """
+    # Note: this returns the full data instead of yielding each sequence,
+    # because the header specifies the number of sequences, so the file cannot
+    # be validated until it's read completely.
+
+    # File should have a single header on the first line.
+    try:
+        header = next(_line_generator(fh))
+    except StopIteration:
+        raise PhylipFormatError("This file is empty.")
+    n_seqs, seq_len = _validate_header(header)
+
+    # All following lines should be ID+sequence. No blank lines are allowed.
+    data = []
+    for line in _line_generator(fh):
+        data.append(_validate_line(line, seq_len))
+    if len(data) != n_seqs:
+        raise PhylipFormatError(
+            "The number of sequences is not %s " % n_seqs +
+            "as specified in the header.")
+    return data
+
+
+def _line_generator(fh):
+    """Just remove linebreak characters and yield lines.
+    """
+    for line in fh:
+        yield line.rstrip('\n')
diff --git a/skbio/io/format/qseq.py b/skbio/io/format/qseq.py
index dc4d472..6604bef 100644
--- a/skbio/io/format/qseq.py
+++ b/skbio/io/format/qseq.py
@@ -17,8 +17,6 @@ Format Support
 +======+======+===============================================================+
 |Yes   |No    |generator of :mod:`skbio.sequence.Sequence` objects            |
 +------+------+---------------------------------------------------------------+
-|Yes   |No    |:mod:`skbio.alignment.SequenceCollection`                      |
-+------+------+---------------------------------------------------------------+
 |Yes   |No    |:mod:`skbio.sequence.Sequence`                                 |
 +------+------+---------------------------------------------------------------+
 |Yes   |No    |:mod:`skbio.sequence.DNA`                                      |
@@ -51,9 +49,8 @@ For more details please refer to the CASAVA documentation [1]_.
    `metadata` attribute is automatically populated with data corresponding
    to the names above.
 
-.. note:: `lowercase` functionality is supported when reading QSeq files,
-   depending on the object type the file is being read into. Refer to
-   specific object constructor documentation for details.
+.. note:: `lowercase` functionality is supported when reading QSeq files.
+   Refer to specific object constructor documentation for details.
 
 .. note:: scikit-bio allows for the filter field to be ommitted, but it is not
    clear if this is part of the original format specification.
@@ -72,8 +69,8 @@ The following additional parameters are the same as in FASTA format
 - ``constructor``: see ``constructor`` parameter in FASTA format
 - ``seq_num``: see ``seq_num`` parameter in FASTA format
 
-SequenceCollection and Generators Only
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Generators Only
+^^^^^^^^^^^^^^^
 - ``filter``: If `True`, excludes sequences that did not pass filtering
   (i.e., filter field is 0). Default is `True`.
 
@@ -90,7 +87,7 @@ Let's define this file in-memory as a ``StringIO``, though this could be a real
 file path, file handle, or anything that's supported by scikit-bio's I/O
 registry in practice:
 
->>> from StringIO import StringIO
+>>> from io import StringIO
 >>> fs = '\n'.join([
 ...     'illumina\t1\t3\t34\t-30\t30\t0\t1\tACG....ACGTAC\truBBBBrBCEFGH\t1',
 ...     'illumina\t1\t3\t34\t30\t-30\t0\t1\tCGGGCATTGCA\tCGGGCasdGCA\t0',
@@ -99,12 +96,50 @@ registry in practice:
 ... ])
 >>> fh = StringIO(fs)
 
-To load the sequences into a ``SequenceCollection``, we run:
-
->>> from skbio import SequenceCollection
->>> sc = SequenceCollection.read(fh, variant='illumina1.3')
->>> sc
-<SequenceCollection: n=2; mean +/- std length=13.00 +/- 0.00>
+To iterate over the sequences using the generator reader, we run:
+
+>>> import skbio.io
+>>> for seq in skbio.io.read(fh, format='qseq', variant='illumina1.3'):
+...     seq
+...     print('')
+Sequence
+--------------------------------------
+Metadata:
+    'id': 'illumina_1:3:34:-30:30#0/1'
+    'index': 0
+    'lane_number': 3
+    'machine_name': 'illumina'
+    'read_number': 1
+    'run_number': 1
+    'tile_number': 34
+    'x': -30
+    'y': 30
+Positional metadata:
+    'quality': <dtype: uint8>
+Stats:
+    length: 13
+--------------------------------------
+0 ACG....ACG TAC
+<BLANKLINE>
+Sequence
+--------------------------------------
+Metadata:
+    'id': 'illumina_1:3:35:-30:30#0/2'
+    'index': 0
+    'lane_number': 3
+    'machine_name': 'illumina'
+    'read_number': 2
+    'run_number': 1
+    'tile_number': 35
+    'x': -30
+    'y': 30
+Positional metadata:
+    'quality': <dtype: uint8>
+Stats:
+    length: 13
+--------------------------------------
+0 ACGTA.AATA AAC
+<BLANKLINE>
 
 Note that only two sequences were loaded because the QSeq reader filters out
 sequences whose filter field is 0 (unless ``filter=False`` is supplied).
@@ -130,7 +165,6 @@ from future.builtins import zip, range
 
 from skbio.io import create_format, QSeqFormatError
 from skbio.io.format._base import _decode_qual_to_phred, _get_nth_sequence
-from skbio.alignment import SequenceCollection
 from skbio.sequence import Sequence, DNA, RNA, Protein
 
 _default_phred_offset = None
@@ -176,29 +210,17 @@ def _qseq_to_generator(fh, constructor=Sequence, filter=_will_filter,
                               **kwargs)
 
 
- at qseq.reader(SequenceCollection)
-def _qseq_to_sequence_collection(fh, constructor=Sequence,
-                                 filter=_will_filter,
-                                 phred_offset=_default_phred_offset,
-                                 variant=_default_variant):
-    return SequenceCollection(list(_qseq_to_generator(
-        fh, constructor=constructor, filter=filter, phred_offset=phred_offset,
-        variant=variant)))
-
-
 @qseq.reader(Sequence)
-def _qseq_to_biological_sequence(fh, seq_num=1,
-                                 phred_offset=_default_phred_offset,
-                                 variant=_default_variant):
+def _qseq_to_sequence(fh, seq_num=1, phred_offset=_default_phred_offset,
+                      variant=_default_variant, **kwargs):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
-                             constructor=Sequence), seq_num)
+                             constructor=Sequence, **kwargs), seq_num)
 
 
 @qseq.reader(DNA)
-def _qseq_to_dna_sequence(fh, seq_num=1,
-                          phred_offset=_default_phred_offset,
-                          variant=_default_variant, **kwargs):
+def _qseq_to_dna(fh, seq_num=1, phred_offset=_default_phred_offset,
+                 variant=_default_variant, **kwargs):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
                              constructor=DNA, **kwargs),
@@ -206,9 +228,8 @@ def _qseq_to_dna_sequence(fh, seq_num=1,
 
 
 @qseq.reader(RNA)
-def _qseq_to_rna_sequence(fh, seq_num=1,
-                          phred_offset=_default_phred_offset,
-                          variant=_default_variant, **kwargs):
+def _qseq_to_rna(fh, seq_num=1, phred_offset=_default_phred_offset,
+                 variant=_default_variant, **kwargs):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
                              constructor=RNA, **kwargs),
@@ -216,9 +237,8 @@ def _qseq_to_rna_sequence(fh, seq_num=1,
 
 
 @qseq.reader(Protein)
-def _qseq_to_protein_sequence(fh, seq_num=1,
-                              phred_offset=_default_phred_offset,
-                              variant=_default_variant, **kwargs):
+def _qseq_to_protein(fh, seq_num=1, phred_offset=_default_phred_offset,
+                     variant=_default_variant, **kwargs):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
                              constructor=Protein, **kwargs),
diff --git a/skbio/io/format/tests/data/blast6_custom_minimal b/skbio/io/format/tests/data/blast6_custom_minimal
new file mode 100644
index 0000000..073319a
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_custom_minimal
@@ -0,0 +1 @@
+subject2
diff --git a/skbio/io/format/tests/data/blast6_custom_mixed_nans b/skbio/io/format/tests/data/blast6_custom_mixed_nans
new file mode 100644
index 0000000..7c5d106
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_custom_mixed_nans
@@ -0,0 +1,2 @@
+N/A	PAAWWWWW	8	1	100.00	N/A	0
+query1	N/A	8	1	N/A	8	0
diff --git a/skbio/io/format/tests/data/blast6_custom_multi_line b/skbio/io/format/tests/data/blast6_custom_multi_line
new file mode 100644
index 0000000..5d28b82
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_custom_multi_line
@@ -0,0 +1,3 @@
+subject2	32	0	100	N/A	subject2	query1
+subject2	18	0	100	N/A	subject2	query1
+subject1	19	0	70	N/A	subject1	query2
diff --git a/skbio/io/format/tests/data/blast6_custom_single_line b/skbio/io/format/tests/data/blast6_custom_single_line
new file mode 100644
index 0000000..9593a7e
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_custom_single_line
@@ -0,0 +1 @@
+query1	PAAWWWWW	8	1	100.00	8	0
diff --git a/skbio/io/format/tests/data/blast6_default_multi_line b/skbio/io/format/tests/data/blast6_default_multi_line
new file mode 100644
index 0000000..930b5c7
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_default_multi_line
@@ -0,0 +1,3 @@
+query1	subject2	100.00	8	0	0	1	8	3	10	9e-05	16.9
+query1	subject2	75.00	8	2	0	1	8	2	9	0.060	11.5
+query2	subject1	71.43	7	2	0	1	7	1	7	0.044	11.9
diff --git a/skbio/io/format/tests/data/blast6_default_single_line b/skbio/io/format/tests/data/blast6_default_single_line
new file mode 100644
index 0000000..f30f0ae
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_default_single_line
@@ -0,0 +1 @@
+query1	subject2	75.00	8	2	0	1	8	2	9	0.060	11.5
diff --git a/skbio/io/format/tests/data/blast6_invalid_column_types b/skbio/io/format/tests/data/blast6_invalid_column_types
new file mode 100644
index 0000000..147c8c1
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_invalid_column_types
@@ -0,0 +1,3 @@
+1.2523	subject2	abcd	8	0	0	1	8	3	10	9e-05	16.9
+query1	subject2	75.00	8	2	0	r	8	2	9	0.060	11.5
+query2	subject1	71.43	7	2	0	1	7	1	7	G24TL	11.9
diff --git a/skbio/io/format/tests/data/blast6_invalid_number_of_columns b/skbio/io/format/tests/data/blast6_invalid_number_of_columns
new file mode 100644
index 0000000..1cd96eb
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_invalid_number_of_columns
@@ -0,0 +1 @@
+query1	subject2	75.00	8	2	0	1	8	2	9
diff --git a/skbio/io/format/tests/data/blast6_invalid_type_in_column b/skbio/io/format/tests/data/blast6_invalid_type_in_column
new file mode 100644
index 0000000..0b4bd6d
--- /dev/null
+++ b/skbio/io/format/tests/data/blast6_invalid_type_in_column
@@ -0,0 +1,2 @@
+query1	subject2	string	8	2	0	1	8	2	9	0.060	11.5
+query1	subject2	75.00	8	2	0	1	8	2	9	0.060	11.5
diff --git a/skbio/io/format/tests/data/blast7_custom_minimal b/skbio/io/format/tests/data/blast7_custom_minimal
new file mode 100644
index 0000000..e3ec0c3
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_custom_minimal
@@ -0,0 +1,6 @@
+# BLAST 2.2.31+
+# Query: query1
+# Subject: subject1
+# Fields: query id
+# 1 hits found
+query1
diff --git a/skbio/io/format/tests/data/blast7_custom_mixed_nans b/skbio/io/format/tests/data/blast7_custom_mixed_nans
new file mode 100644
index 0000000..ba50b5a
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_custom_mixed_nans
@@ -0,0 +1,16 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject2
+# Fields: query gi, subject gi, query length, subject length, query frame, sbjct frame, query id, subject id
+# 1 hits found
+0	N/A	8	13	1	1	N/A	subject2
+N/A	0	8	N/A	1	1	query1	N/A
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject4
+# 0 hits found
+# BLAST processed 2 queries
diff --git a/skbio/io/format/tests/data/blast7_custom_multi_line b/skbio/io/format/tests/data/blast7_custom_multi_line
new file mode 100644
index 0000000..8de495e
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_custom_multi_line
@@ -0,0 +1,16 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject2
+# Fields: q. start, q. end, s. start, s. end, identical, mismatches, sbjct frame, query acc.ver, subject acc.ver
+# 3 hits found
+1	8	3	10	8	0	1	query1	subject2
+2	5	2	15	8	0	2	query1	subject2
+1	6	2	12	8	0	1	query1	subject2
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject3
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject4
+# 0 hits found
diff --git a/skbio/io/format/tests/data/blast7_custom_single_line b/skbio/io/format/tests/data/blast7_custom_single_line
new file mode 100644
index 0000000..75eb536
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_custom_single_line
@@ -0,0 +1,15 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject2
+# Fields: query id, % positives, % identity, alignment length, subject gi, bit score, q. end, query seq
+# 1 hits found
+query1	100.00	100.00	8	0	16.9	8	PAAWWWWW
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject3
+# 0 hits found
+# BLAST processed 2 queries
diff --git a/skbio/io/format/tests/data/blast7_default_multi_line b/skbio/io/format/tests/data/blast7_default_multi_line
new file mode 100644
index 0000000..aafa66b
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_default_multi_line
@@ -0,0 +1,12 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject2
+# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
+# 3 hits found
+query1	subject2	70.00	5	0	0	7	60	3	100	9e-05	10.5
+query1	subject2	30.00	8	0	0	6	15	1	100	0.053	12.0
+query1	subject2	90.00	2	0	0	9	35	2	100	0.002	8.3
diff --git a/skbio/io/format/tests/data/blast7_default_single_line b/skbio/io/format/tests/data/blast7_default_single_line
new file mode 100644
index 0000000..f36be87
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_default_single_line
@@ -0,0 +1,10 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject2
+# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
+# 1 hits found
+query1	subject2	100.00	8	0	0	1	8	3	10	9e-05	16.9
diff --git a/skbio/io/format/tests/data/blast7_invalid_differing_fields b/skbio/io/format/tests/data/blast7_invalid_differing_fields
new file mode 100644
index 0000000..d3961e4
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_invalid_differing_fields
@@ -0,0 +1,21 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject3
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject1
+# Fields: query id, subject id, q. start
+# 1 hits found
+query1	subject1	4
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject1
+# Fields: query id, subject id, score
+# 1 hits found
+query2	subject2	4
+# BLAST processed 2 queries
diff --git a/skbio/io/format/tests/data/blast7_invalid_for_sniffer b/skbio/io/format/tests/data/blast7_invalid_for_sniffer
new file mode 100644
index 0000000..df6e3bb
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_invalid_for_sniffer
@@ -0,0 +1,4 @@
+# BLASTP 2.2.31+
+asdf
+# Subject: subject1
+# 0 hits found
diff --git a/skbio/io/format/tests/data/blast7_invalid_for_sniffer_2 b/skbio/io/format/tests/data/blast7_invalid_for_sniffer_2
new file mode 100644
index 0000000..751803e
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_invalid_for_sniffer_2
@@ -0,0 +1,4 @@
+# BLASTP 2.2.31+
+# Query: query1
+asdf
+# 0 hits found
diff --git a/skbio/io/format/tests/data/blast7_invalid_gibberish b/skbio/io/format/tests/data/blast7_invalid_gibberish
new file mode 100644
index 0000000..b2ffaff
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_invalid_gibberish
@@ -0,0 +1,3 @@
+aphanbrnaweoowehapivaborebaweo;aweifh[fapofh29r-2u-23
+adjapignqp9rghp4hq2hrq93phq9hqwhpehoweawe
+aidgpuhp0eh9q2yrq02r9q9euq9rhg93fhqefjwphwpahw]
diff --git a/skbio/io/format/tests/data/blast7_invalid_no_data b/skbio/io/format/tests/data/blast7_invalid_no_data
new file mode 100644
index 0000000..b3d2a59
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_invalid_no_data
@@ -0,0 +1,24 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject2
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject3
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject2
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject3
+# 0 hits found
diff --git a/skbio/io/format/tests/data/blast7_invalid_too_many_columns b/skbio/io/format/tests/data/blast7_invalid_too_many_columns
new file mode 100644
index 0000000..c7a8f57
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_invalid_too_many_columns
@@ -0,0 +1,21 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject3
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject1
+# Fields: query id, subject id
+# 1 hits found
+query1	subject1	subject2	query2
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject1
+# Fields: query id, subject id
+# 1 hits found
+query2	subject2
+# BLAST processed 2 queries
diff --git a/skbio/io/format/tests/data/blast7_invalid_unrecognized_field b/skbio/io/format/tests/data/blast7_invalid_unrecognized_field
new file mode 100644
index 0000000..3255fda
--- /dev/null
+++ b/skbio/io/format/tests/data/blast7_invalid_unrecognized_field
@@ -0,0 +1,10 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject1
+# Fields: query id, sallid
+# 1 hits found
+query1	subject1
diff --git a/skbio/io/format/tests/data/fasta_sequence_collection_different_type b/skbio/io/format/tests/data/fasta_tabular_msa_different_type
similarity index 100%
rename from skbio/io/format/tests/data/fasta_sequence_collection_different_type
rename to skbio/io/format/tests/data/fasta_tabular_msa_different_type
diff --git a/skbio/io/format/tests/data/genbank_5_blanks_start_of_file b/skbio/io/format/tests/data/genbank_5_blanks_start_of_file
new file mode 100644
index 0000000..4bc4da3
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_5_blanks_start_of_file
@@ -0,0 +1,6 @@
+
+
+
+
+
+LOCUS       NC_000932             154478 bp    DNA     circular PLN 15-APR-2009
diff --git a/skbio/io/format/tests/data/genbank_6_blanks_start_of_file b/skbio/io/format/tests/data/genbank_6_blanks_start_of_file
new file mode 100644
index 0000000..d91d2d3
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_6_blanks_start_of_file
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+LOCUS       NC_000932             154478 bp    DNA     circular PLN 15-APR-2009
diff --git a/skbio/io/format/tests/data/genbank_missing_locus_name b/skbio/io/format/tests/data/genbank_missing_locus_name
new file mode 100644
index 0000000..1ad43f8
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_missing_locus_name
@@ -0,0 +1 @@
+LOCUS                    154478 bp    DNA     circular PLN 15-APR-2009
diff --git a/skbio/io/format/tests/data/genbank_multi_records b/skbio/io/format/tests/data/genbank_multi_records
new file mode 100644
index 0000000..a4d9808
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_multi_records
@@ -0,0 +1,46 @@
+LOCUS       AAB29917   9 aa      linear   BCT   23-SEP-1994
+DEFINITION  L-carnitine amidase {N-terminal}
+ACCESSION   AAB29917
+VERSION     AAB29917.1  GI:545426
+DBSOURCE    accession AAB29917.1
+KEYWORDS    .
+SOURCE      Bacteria
+  ORGANISM  Bacteria
+            Unclassified.
+REFERENCE   1  (residues 1 to 9)
+  AUTHORS   Joeres,U. and Kula,M.R.
+  TITLE     a microbial L-carnitine amidase
+  JOURNAL   AMB 40 (5), 606-610 (1994)
+  PUBMED    7764422
+  REMARK    from the original journal article.
+REFERENCE   1  (residues 1 to 9)
+  AUTHORS   Joeres,U. and Kula,M.R.
+  TITLE     a microbial L-carnitine amidase
+  JOURNAL   AMB 40 (5), 606-610 (1994)
+  PUBMED    7764422
+COMMENT     Method: direct peptide sequencing.
+FEATURES             Location/Qualifiers
+       source        1..9
+                     /organism="Bacteria"
+      Protein        1..>9
+                     /product="L-carnitine amidase"
+ORIGIN
+        1 gsreildfk
+//
+LOCUS       HQ018078   9 bp   DNA   linear   ENV   29-AUG-2010
+DEFINITION  Uncultured Xylanimonas sp.16S, partial
+ACCESSION   HQ018078
+VERSION     HQ018078.1  GI:304421728
+KEYWORDS    ENV.
+SOURCE      uncultured Xylanimonas sp.
+  ORGANISM  uncultured Xylanimonas sp.
+            Bacteria; Actinobacteria; Micrococcales; Promicromonosporaceae; Xylanimonas; environmental samples.
+FEATURES             Location/Qualifiers
+       source        1..9
+                     /country="Brazil: Parana, Paranavai"
+                     /environmental_sample
+         rRNA        complement(<2..>8)
+                     /product="16S ribosomal RNA"
+ORIGIN
+        1 catgcaggc
+//
diff --git a/skbio/io/format/tests/data/genbank_single_record b/skbio/io/format/tests/data/genbank_single_record
new file mode 100644
index 0000000..cf1bf02
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_single_record
@@ -0,0 +1,27 @@
+LOCUS       ECOALKP   63 bp   mRNA   linear   BCT   26-APR-1993
+DEFINITION  alkaline phosphatase signal mRNA, 5' end.
+ACCESSION   M14399
+VERSION     M14399.1  GI:145229
+KEYWORDS    alkaline phosphatase; signal peptide.
+SOURCE      Escherichia coli
+  ORGANISM  Escherichia coli
+            Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales; Enterobacteriaceae; Escherichia.
+COMMENT     Original source text: E.coli, cDNA to mRNA.
+FEATURES             Location/Qualifiers
+       source        1..63
+                     /db_xref="taxon:562"
+                     /mol_type="mRNA"
+                     /organism="Escherichia coli"
+          CDS        1..>63
+                     /codon_start=1
+                     /db_xref="GI:145230"
+                     /db_xref="taxon:562"
+                     /db_xref="taxon:561"
+                     /note="alkaline phosphatase signal peptide"
+                     /protein_id="AAA23431.1"
+                     /transl_table=11
+                     /translation="MKQSTIALAVLPLLFTPVTKA"
+ORIGIN
+        1 gtgaaacaaa gcactattgc actggctgtc ttaccgttac tgtttacccc tgtgacaaaa
+       61 gcc
+//
diff --git a/skbio/io/format/tests/data/genbank_single_record_lower b/skbio/io/format/tests/data/genbank_single_record_lower
new file mode 100644
index 0000000..5708ef2
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_single_record_lower
@@ -0,0 +1,4 @@
+LOCUS       AAB29917   9 aa      linear   BCT   23-SEP-1994
+ORIGIN
+        1 gsreildfk
+//
diff --git a/skbio/io/format/tests/data/genbank_single_record_upper b/skbio/io/format/tests/data/genbank_single_record_upper
new file mode 100644
index 0000000..a2a73b7
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_single_record_upper
@@ -0,0 +1,4 @@
+LOCUS       AAB29917   9 aa      linear   BCT   23-SEP-1994
+ORIGIN
+        1 GSREILDFK
+//
diff --git a/skbio/io/format/tests/data/genbank_w_beginning_whitespace b/skbio/io/format/tests/data/genbank_w_beginning_whitespace
new file mode 100644
index 0000000..3d2ebb8
--- /dev/null
+++ b/skbio/io/format/tests/data/genbank_w_beginning_whitespace
@@ -0,0 +1 @@
+  LOCUS           NC_000932         154478 bp    DNA     circular PLN 15-APR-2009
diff --git a/skbio/io/format/tests/data/legacy9_and_blast7_default b/skbio/io/format/tests/data/legacy9_and_blast7_default
new file mode 100644
index 0000000..c21055f
--- /dev/null
+++ b/skbio/io/format/tests/data/legacy9_and_blast7_default
@@ -0,0 +1,12 @@
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject2
+# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
+# 1 hits found
+query2	subject2	100.00	8	0	1	0	9	3	10	2e-05	9.8
+# BLASTN 2.2.3 [May-13-2002]
+# Database: other_vertebrate
+# Query: query2
+# Fields: 
+Query id,Subject id,% identity,alignment length,mismatches,gap opens,q. start,q. end,s. start,s. end,evalue,bit score
+query2	subject1	70.00	9	1	0	1	8	4	9	0.025	11.7
diff --git a/skbio/io/format/tests/data/legacy9_invalid_differing_fields b/skbio/io/format/tests/data/legacy9_invalid_differing_fields
new file mode 100644
index 0000000..bdd5b1a
--- /dev/null
+++ b/skbio/io/format/tests/data/legacy9_invalid_differing_fields
@@ -0,0 +1,20 @@
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject1
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query1
+# Subject: subject3
+# 0 hits found
+# BLASTP 2.2.31+
+# Query: query2
+# Subject: subject1
+# Fields: query id, subject id, q. start
+# 1 hits found
+query1	subject1	4
+# BLASTP 2.2.3 [May-13-2002]
+# Database: other_vertebrate
+# Subject: subject1
+# Fields: 
+Query id,Subject id,subject ids
+query2	subject1	N/A
diff --git a/skbio/io/format/tests/data/legacy9_invalid_too_many_columns b/skbio/io/format/tests/data/legacy9_invalid_too_many_columns
new file mode 100644
index 0000000..42989c7
--- /dev/null
+++ b/skbio/io/format/tests/data/legacy9_invalid_too_many_columns
@@ -0,0 +1,6 @@
+# BLASTN 2.2.3 [May-13-2002]
+# Database: other_vertebrate
+# Query: AF178033
+# Fields: 
+Query id,Subject id,% identity,alignment length,mismatches,gap openings,q. start,q. end,s. start,s. end,e-value,bit score
+query1	subject1	80.00	7	2	0	0	9	4	9	0.023	14.4	0.145
diff --git a/skbio/io/format/tests/data/legacy9_mixed_nans b/skbio/io/format/tests/data/legacy9_mixed_nans
new file mode 100644
index 0000000..550710b
--- /dev/null
+++ b/skbio/io/format/tests/data/legacy9_mixed_nans
@@ -0,0 +1,7 @@
+# BLASTN 2.2.3 [May-13-2002]
+# Database: other_vertebrate
+# Query: query2
+# Fields: 
+Query id,Subject id,% identity,alignment length,mismatches,gap opens,q. start,q. end,s. start,s. end,evalue,bit score
+N/A	subject1	N/A	7	1	0	N/A	8	4	10	N/A	15.5
+query2	subject1	90.00	8	N/A	0	0	8	N/A	9	1e-05	N/A
diff --git a/skbio/io/format/tests/data/legacy9_multi_line b/skbio/io/format/tests/data/legacy9_multi_line
new file mode 100644
index 0000000..af859fc
--- /dev/null
+++ b/skbio/io/format/tests/data/legacy9_multi_line
@@ -0,0 +1,8 @@
+# BLASTN 2.2.3 [May-13-2002]
+# Database: other_vertebrate
+# Query: query1
+# Fields: 
+Query id,Subject id,% identity,alignment length,mismatches,gap opens,q. start,q. end,s. start,s. end,evalue,bit score
+query1	subject1	90.00	7	1	0	0	8	4	10	1e-05	15.5
+query1	subject1	70.00	8	0	1	0	9	5	7	0.231	7.8
+query1	subject1	90.00	5	1	1	0	0	2	10	0.022	13.0
diff --git a/skbio/io/format/tests/data/legacy9_single_line b/skbio/io/format/tests/data/legacy9_single_line
new file mode 100644
index 0000000..98bc64b
--- /dev/null
+++ b/skbio/io/format/tests/data/legacy9_single_line
@@ -0,0 +1,6 @@
+# BLASTN 2.2.3 [May-13-2002]
+# Database: other_vertebrate
+# Query: query1
+# Fields: 
+Query id,Subject id,% identity,alignment length,mismatches,gap opens,q. start,q. end,s. start,s. end,evalue,bit score
+query1	subject1	90.00	7	1	0	0	8	4	10	1e-05	15.5
diff --git a/skbio/io/format/tests/data/phylip_invalid_empty_line_after_header b/skbio/io/format/tests/data/phylip_invalid_empty_line_after_header
new file mode 100644
index 0000000..d7c9175
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_empty_line_after_header
@@ -0,0 +1,4 @@
+2 20
+
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_empty_line_before_header b/skbio/io/format/tests/data/phylip_invalid_empty_line_before_header
new file mode 100644
index 0000000..e1a71b5
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_empty_line_before_header
@@ -0,0 +1,4 @@
+
+2 20
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_empty_line_between_seqs b/skbio/io/format/tests/data/phylip_invalid_empty_line_between_seqs
new file mode 100644
index 0000000..03226db
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_empty_line_between_seqs
@@ -0,0 +1,4 @@
+2 20
+foo       ..ACC-GTTG G..AATGC.C
+
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_header_too_long b/skbio/io/format/tests/data/phylip_invalid_header_too_long
new file mode 100644
index 0000000..49f6bee
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_header_too_long
@@ -0,0 +1,3 @@
+2 20 extra_text
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_header_too_short b/skbio/io/format/tests/data/phylip_invalid_header_too_short
new file mode 100644
index 0000000..0020694
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_header_too_short
@@ -0,0 +1,3 @@
+ 20
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_no_header b/skbio/io/format/tests/data/phylip_invalid_no_header
new file mode 100644
index 0000000..a310a3d
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_no_header
@@ -0,0 +1,2 @@
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_seq_too_long b/skbio/io/format/tests/data/phylip_invalid_seq_too_long
new file mode 100644
index 0000000..395844d
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_seq_too_long
@@ -0,0 +1,3 @@
+2 20
+foo       ..ACC-GTTG G..AATGC.CGA
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_seq_too_short b/skbio/io/format/tests/data/phylip_invalid_seq_too_short
new file mode 100644
index 0000000..b8c317e
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_seq_too_short
@@ -0,0 +1,3 @@
+2 20
+foo       ..ACC-GTTG G..AATGC
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_too_few_seqs b/skbio/io/format/tests/data/phylip_invalid_too_few_seqs
new file mode 100644
index 0000000..3dedb46
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_too_few_seqs
@@ -0,0 +1,2 @@
+2 20
+foo       ..ACC-GTTG G..AATGC.C
diff --git a/skbio/io/format/tests/data/phylip_invalid_too_many_seqs b/skbio/io/format/tests/data/phylip_invalid_too_many_seqs
new file mode 100644
index 0000000..a36540d
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_too_many_seqs
@@ -0,0 +1,3 @@
+1 20
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_invalid_zero_seq_len b/skbio/io/format/tests/data/phylip_invalid_zero_seq_len
new file mode 100644
index 0000000..d3e1fd6
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_zero_seq_len
@@ -0,0 +1,2 @@
+1 0
+          
diff --git a/skbio/io/format/tests/data/phylip_invalid_zero_seqs b/skbio/io/format/tests/data/phylip_invalid_zero_seqs
new file mode 100644
index 0000000..6e8183b
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_invalid_zero_seqs
@@ -0,0 +1 @@
+0 1
diff --git a/skbio/io/format/tests/data/phylip_variable_length_ids b/skbio/io/format/tests/data/phylip_variable_length_ids
index d658257..b403ea6 100644
--- a/skbio/io/format/tests/data/phylip_variable_length_ids
+++ b/skbio/io/format/tests/data/phylip_variable_length_ids
@@ -1,7 +1,7 @@
 6 6
-          .-ACGU
-a         UGCA-.
-bb        .ACGU-
-1         ugca-.
-abcdefghijAaAaAa
+          .-ACGT
+a         TGCA-.
+bb        .ACGT-
+1         TGCA-.
+abcdefghijAAAAAA
 ab def42ijGGGGGG
diff --git a/skbio/io/format/tests/data/phylip_varied_whitespace_in_seqs b/skbio/io/format/tests/data/phylip_varied_whitespace_in_seqs
new file mode 100644
index 0000000..d905129
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_varied_whitespace_in_seqs
@@ -0,0 +1,3 @@
+2 20
+foo       ..ACC   -GTTG G..AA     TGC.C
+bar         TTACCGGT-G GCCTA-GCAT  
diff --git a/skbio/io/format/tests/data/phylip_whitespace_in_header_1 b/skbio/io/format/tests/data/phylip_whitespace_in_header_1
new file mode 100644
index 0000000..a4d33db
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_whitespace_in_header_1
@@ -0,0 +1,3 @@
+                                     2 20
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_whitespace_in_header_2 b/skbio/io/format/tests/data/phylip_whitespace_in_header_2
new file mode 100644
index 0000000..f31613a
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_whitespace_in_header_2
@@ -0,0 +1,3 @@
+      2            20
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/phylip_whitespace_in_header_3 b/skbio/io/format/tests/data/phylip_whitespace_in_header_3
new file mode 100644
index 0000000..05e338a
--- /dev/null
+++ b/skbio/io/format/tests/data/phylip_whitespace_in_header_3
@@ -0,0 +1,3 @@
+2        20                          
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/format/tests/data/qual_sequence_collection_different_type b/skbio/io/format/tests/data/qual_tabular_msa_different_type
similarity index 100%
rename from skbio/io/format/tests/data/qual_sequence_collection_different_type
rename to skbio/io/format/tests/data/qual_tabular_msa_different_type
diff --git a/skbio/io/format/tests/test_blast6.py b/skbio/io/format/tests/test_blast6.py
new file mode 100644
index 0000000..4f0b78d
--- /dev/null
+++ b/skbio/io/format/tests/test_blast6.py
@@ -0,0 +1,123 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+from six import assertRaisesRegex
+
+import unittest
+
+import pandas as pd
+import numpy as np
+
+from skbio.util import get_data_path, assert_data_frame_almost_equal
+from skbio.io.format.blast6 import _blast6_to_data_frame
+
+
+class TestBlast6Reader(unittest.TestCase):
+    def test_default_valid_single_line(self):
+        fp = get_data_path('blast6_default_single_line')
+        df = _blast6_to_data_frame(fp, default_columns=True)
+        exp = pd.DataFrame([['query1', 'subject2', 75.0, 8.0, 2.0, 0.0, 1.0,
+                             8.0, 2.0, 9.0, 0.06, 11.5]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_default_valid_multi_line(self):
+        fp = get_data_path('blast6_default_multi_line')
+        df = _blast6_to_data_frame(fp, default_columns=True)
+        exp = pd.DataFrame([['query1', 'subject2', 100.00, 8.0, 0.0, 0.0, 1.0,
+                             8.0, 3.0, 10.0, 9e-05, 16.9],
+                            ['query1', 'subject2', 75.00, 8.0, 2.0, 0.0, 1.0,
+                             8.0, 2.0, 9.0, 0.060, 11.5],
+                            ['query2', 'subject1', 71.43, 7.0, 2.0, 0.0, 1.0,
+                            7.0, 1.0, 7.0, 0.044, 11.9]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_custom_valid_single_line(self):
+        fp = get_data_path('blast6_custom_single_line')
+        df = _blast6_to_data_frame(fp, columns=['qacc', 'qseq', 'btop',
+                                                'sframe', 'ppos',
+                                                'positive', 'gaps'])
+        exp = pd.DataFrame([['query1', 'PAAWWWWW', 8.0, 1.0, 100.00, 8.0,
+                             0.0]], columns=['qacc', 'qseq', 'btop', 'sframe',
+                                             'ppos', 'positive', 'gaps'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_custom_valid_multi_line(self):
+        fp = get_data_path('blast6_custom_multi_line')
+        df = _blast6_to_data_frame(fp, columns=['sacc', 'score', 'gapopen',
+                                                'qcovs', 'sblastnames',
+                                                'sallacc', 'qaccver'])
+        exp = pd.DataFrame([['subject2', 32.0, 0.0, 100.0, np.nan, 'subject2',
+                             'query1'], ['subject2', 18.0, 0.0, 100.0, np.nan,
+                                         'subject2', 'query1'],
+                            ['subject1', 19.0, 0.0, 70.0, np.nan, 'subject1',
+                             'query2']], columns=['sacc', 'score', 'gapopen',
+                                                  'qcovs', 'sblastnames',
+                                                  'sallacc', 'qaccver'])
+        exp['sblastnames'] = exp['sblastnames'].astype(object)
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_valid_nan_handling(self):
+        fp = get_data_path('blast6_custom_mixed_nans')
+        df = _blast6_to_data_frame(fp, columns=['qacc', 'qseq', 'btop',
+                                                'sframe', 'ppos', 'positive',
+                                                'gaps'])
+        exp = pd.DataFrame([[np.nan, 'PAAWWWWW', 8.0, 1.0, 100.00, np.nan,
+                             0.0], ['query1', np.nan, 8.0, 1.0, np.nan, 8.0,
+                                    0.0]], columns=['qacc', 'qseq', 'btop',
+                                                    'sframe', 'ppos',
+                                                    'positive', 'gaps'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_valid_minimal(self):
+        fp = get_data_path('blast6_custom_minimal')
+        df = _blast6_to_data_frame(fp, columns=['sacc'])
+        exp = pd.DataFrame([['subject2']], columns=['sacc'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_custom_and_default_passed_error(self):
+        fp = get_data_path('blast6_default_single_line')
+        with assertRaisesRegex(self, ValueError,
+                               "`columns` and `default_columns`"):
+            _blast6_to_data_frame(fp, columns=['qseqid'], default_columns=True)
+
+    def test_no_columns_passed_error(self):
+        fp = get_data_path('blast6_default_single_line')
+        with assertRaisesRegex(self, ValueError,
+                               "Either `columns` or `default_columns`"):
+            _blast6_to_data_frame(fp)
+
+    def test_wrong_amount_of_columns_error(self):
+        fp = get_data_path('blast6_invalid_number_of_columns')
+        with assertRaisesRegex(self, ValueError,
+                               "Specified number of columns \(12\).*\(10\)"):
+            _blast6_to_data_frame(fp, default_columns=True)
+
+    def test_different_data_in_same_column(self):
+        fp = get_data_path('blast6_invalid_type_in_column')
+        with self.assertRaises(ValueError):
+            _blast6_to_data_frame(fp, default_columns=True)
+
+    def test_wrong_column_name_error(self):
+        fp = get_data_path('blast6_default_single_line')
+        with assertRaisesRegex(self, ValueError,
+                               "Unrecognized column.*'abcd'"):
+            _blast6_to_data_frame(fp, columns=['qseqid', 'sseqid', 'pident',
+                                               'length', 'mismatch', 'gapopen',
+                                               'qstart', 'qend', 'sstart',
+                                               'send', 'abcd', 'bitscore'])
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/format/tests/test_blast7.py b/skbio/io/format/tests/test_blast7.py
new file mode 100644
index 0000000..f323077
--- /dev/null
+++ b/skbio/io/format/tests/test_blast7.py
@@ -0,0 +1,212 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+from six import assertRaisesRegex
+
+import unittest
+
+import pandas as pd
+import numpy as np
+
+from skbio.util import get_data_path, assert_data_frame_almost_equal
+from skbio.io import BLAST7FormatError
+from skbio.io.format.blast7 import _blast7_to_data_frame, _blast7_sniffer
+
+
+class TestBLAST7Sniffer(unittest.TestCase):
+    def setUp(self):
+        self.positives = [get_data_path(e) for e in [
+            'blast7_default_single_line',
+            'blast7_default_multi_line',
+            'blast7_custom_minimal',
+            'blast7_custom_single_line',
+            'blast7_custom_multi_line',
+            'blast7_custom_mixed_nans',
+            'blast7_invalid_differing_fields',
+            'blast7_invalid_no_data',
+            'blast7_invalid_too_many_columns',
+            'legacy9_and_blast7_default',
+            'legacy9_invalid_too_many_columns',
+            'legacy9_mixed_nans',
+            'legacy9_multi_line',
+            'legacy9_single_line']]
+
+        self.negatives = [get_data_path(e) for e in [
+            'blast7_invalid_gibberish',
+            'blast7_invalid_for_sniffer',
+            'blast7_invalid_for_sniffer_2',
+            'empty']]
+
+    def test_positives(self):
+        for fp in self.positives:
+            self.assertEqual(_blast7_sniffer(fp), (True, {}))
+
+    def test_negatives(self):
+        for fp in self.negatives:
+            self.assertEqual(_blast7_sniffer(fp), (False, {}))
+
+
+class TestBlast7Reader(unittest.TestCase):
+    def test_default_valid_single_line(self):
+        fp = get_data_path('blast7_default_single_line')
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([['query1', 'subject2', 100.00, 8.0, 0.0, 0.0, 1.0,
+                             8.0, 3.0, 10.0, 9e-05, 16.9]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+        fp = get_data_path('legacy9_single_line')
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([['query1', 'subject1', 90.00, 7.0, 1.0, 0.0, 0.0,
+                             8.0, 4.0, 10.0, 1e-05, 15.5]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_default_valid_multi_line(self):
+        fp = get_data_path('blast7_default_multi_line')
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([['query1', 'subject2', 70.00, 5.0, 0.0, 0.0, 7.0,
+                             60.0, 3.0, 100.0, 9e-05, 10.5],
+                            ['query1', 'subject2', 30.00, 8.0, 0.0, 0.0, 6.0,
+                             15.0, 1.0, 100.0, 0.053, 12.0],
+                            ['query1', 'subject2', 90.00, 2.0, 0.0, 0.0, 9.0,
+                             35.0, 2.0, 100.0, 0.002, 8.3]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+        fp = get_data_path('legacy9_multi_line')
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([['query1', 'subject1', 90.00, 7.0, 1.0, 0.0, 0.0,
+                             8.0, 4.0, 10.0, 1e-05, 15.5],
+                            ['query1', 'subject1', 70.00, 8.0, 0.0, 1.0, 0.0,
+                             9.0, 5.0, 7.0, 0.231, 7.8],
+                            ['query1', 'subject1', 90.00, 5.0, 1.0, 1.0, 0.0,
+                             0.0, 2.0, 10.0, 0.022, 13.0]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_default_valid_mixed_output(self):
+        fp = get_data_path('legacy9_and_blast7_default')
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([['query2', 'subject2', 100.00, 8.0, 0.0, 1.0, 0.0,
+                             9.0, 3.0, 10.0, 2e-05, 9.8],
+                            ['query2', 'subject1', 70.00, 9.0, 1.0, 0.0, 1.0,
+                             8.0, 4.0, 9.0, 0.025, 11.7]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_custom_valid_minimal(self):
+        fp = get_data_path("blast7_custom_minimal")
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([['query1']], columns=['qseqid'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_custom_valid_single_line(self):
+        fp = get_data_path("blast7_custom_single_line")
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([['query1', 100.00, 100.00, 8.0, 0.0, 16.9, 8.0,
+                             'PAAWWWWW']],
+                           columns=['qseqid', 'ppos', 'pident', 'length',
+                                    'sgi', 'bitscore', 'qend', 'qseq'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_custom_valid_multi_line(self):
+        fp = get_data_path("blast7_custom_multi_line")
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([[1.0, 8.0, 3.0, 10.0, 8.0, 0.0, 1.0, 'query1',
+                             'subject2'],
+                            [2.0, 5.0, 2.0, 15.0, 8.0, 0.0, 2.0, 'query1',
+                             'subject2'],
+                            [1.0, 6.0, 2.0, 12.0, 8.0, 0.0, 1.0, 'query1',
+                             'subject2']],
+                           columns=['qstart', 'qend', 'sstart', 'send',
+                                    'nident', 'mismatch', 'sframe',
+                                    'qaccver', 'saccver'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_custom_valid_mixed_nans(self):
+        fp = get_data_path("blast7_custom_mixed_nans")
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([[0.0, np.nan, 8.0, 13.0, 1.0, 1.0, np.nan,
+                             'subject2'],
+                            [np.nan, 0.0, 8.0, np.nan, 1.0, 1.0, 'query1',
+                            np.nan]],
+                           columns=['qgi', 'sgi', 'qlen', 'slen', 'qframe',
+                                    'sframe', 'qseqid', 'sseqid'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_legacy9_valid_mixed_nans(self):
+        fp = get_data_path("legacy9_mixed_nans")
+        df = _blast7_to_data_frame(fp)
+        exp = pd.DataFrame([[np.nan, 'subject1', np.nan, 7.0, 1.0, 0.0, np.nan,
+                             8.0, 4.0, 10.0, np.nan, 15.5],
+                            ['query2', 'subject1', 90.00, 8.0, np.nan, 0.0,
+                             0.0, 8.0, np.nan, 9.0, 1e-05, np.nan]],
+                           columns=['qseqid', 'sseqid', 'pident', 'length',
+                                    'mismatch', 'gapopen', 'qstart', 'qend',
+                                    'sstart', 'send', 'evalue', 'bitscore'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_differing_fields_error(self):
+        fp = get_data_path("blast7_invalid_differing_fields")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
+                               " do.*\[.*'qseqid', .*'sseqid', .*'score'\]"):
+            _blast7_to_data_frame(fp)
+        fp = get_data_path("legacy9_invalid_differing_fields")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
+                               " do.*\[.*'qseqid', .*'sseqid', "
+                               ".*'sallseqid'\]"):
+            _blast7_to_data_frame(fp)
+
+    def test_no_data_error(self):
+        fp = get_data_path("blast7_invalid_gibberish")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "File contains no"):
+            _blast7_to_data_frame(fp)
+        fp = get_data_path("blast7_invalid_no_data")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "File contains no"):
+            _blast7_to_data_frame(fp)
+        fp = get_data_path("empty")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "File contains no"):
+            _blast7_to_data_frame(fp)
+
+    def test_wrong_amount_of_columns_error(self):
+        fp = get_data_path("blast7_invalid_too_many_columns")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "Number of fields.*\(2\)"):
+            _blast7_to_data_frame(fp)
+        fp = get_data_path("legacy9_invalid_too_many_columns")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "Number of fields.*\(12\)"):
+            _blast7_to_data_frame(fp)
+
+    def test_unrecognized_field_error(self):
+        fp = get_data_path("blast7_invalid_unrecognized_field")
+        with assertRaisesRegex(self, BLAST7FormatError,
+                               "Unrecognized field \(.*'sallid'\)"):
+            _blast7_to_data_frame(fp)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/format/tests/test_clustal.py b/skbio/io/format/tests/test_clustal.py
index 32fe0a5..ac91338 100644
--- a/skbio/io/format/tests/test_clustal.py
+++ b/skbio/io/format/tests/test_clustal.py
@@ -8,17 +8,40 @@
 
 from __future__ import absolute_import, division, print_function
 
+import string
 from io import StringIO
 from unittest import TestCase, main
 
+import six
+
+from skbio import TabularMSA
+from skbio.sequence._iupac_sequence import IUPACSequence
+from skbio.util._decorator import classproperty, overrides
 from skbio.io.format.clustal import (
-    _clustal_to_alignment, _alignment_to_clustal, _clustal_sniffer,
+    _clustal_to_tabular_msa, _tabular_msa_to_clustal, _clustal_sniffer,
     _is_clustal_seq_line, _delete_trailing_number, _check_length,
     _label_line_parser)
 
 from skbio.io import ClustalFormatError
 
 
+class CustomSequence(IUPACSequence):
+    @classproperty
+    @overrides(IUPACSequence)
+    def gap_chars(cls):
+        return set('-.')
+
+    @classproperty
+    @overrides(IUPACSequence)
+    def nondegenerate_chars(cls):
+        return set(string.ascii_letters)
+
+    @classproperty
+    @overrides(IUPACSequence)
+    def degenerate_map(cls):
+        return {}
+
+
 class ClustalHelperTests(TestCase):
     def test_label_line_parser(self):
         self.assertEqual(_label_line_parser(StringIO(u'abc\tucag')),
@@ -205,32 +228,33 @@ CGAUCAGUCAGUCGAU---------- 34
 UGCUGCAUCA---------------- 33
 *     ***""")]
 
-    def test_alignment_to_clustal_with_empty_input(self):
-        result = _clustal_to_alignment(StringIO())
+    def test_tabular_msa_to_clustal_with_empty_input(self):
+        result = _clustal_to_tabular_msa(StringIO(),
+                                         constructor=CustomSequence)
         self.assertEqual(dict(result), {})
 
-    def test_alignment_to_clustal_with_bad_input(self):
+    def test_tabular_msa_to_clustal_with_bad_input(self):
         BAD = StringIO(u'\n'.join(['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']))
-        result = _clustal_to_alignment(BAD, strict=False)
-        self.assertEqual(dict(result), {})
-        # should fail unless we turned strict processing off
+
         with self.assertRaises(ClustalFormatError):
-            BAD.seek(0)
-            dict(_clustal_to_alignment(BAD))
+            dict(_clustal_to_tabular_msa(BAD, constructor=CustomSequence))
 
-    def test_valid_alignment_to_clustal_and_clustal_to_alignment(self):
+    def test_valid_tabular_msa_to_clustal_and_clustal_to_tabular_msa(self):
         for valid_out in self.valid_clustal_out:
-            result_before = _clustal_to_alignment(valid_out)
+            result_before = _clustal_to_tabular_msa(
+                    valid_out, constructor=CustomSequence)
             with StringIO() as fh:
-                _alignment_to_clustal(result_before, fh)
+                _tabular_msa_to_clustal(result_before, fh)
                 fh.seek(0)
-                result_after = _clustal_to_alignment(fh)
+                result_after = _clustal_to_tabular_msa(
+                        fh, constructor=CustomSequence)
             self.assertEqual(result_before, result_after)
 
-    def test_invalid_alignment_to_clustal_and_clustal_to_alignment(self):
+    def test_invalid_tabular_msa_to_clustal_and_clustal_to_tabular_msa(self):
         for invalid_out in self.invalid_clustal_out:
             with self.assertRaises(ClustalFormatError):
-                dict(_clustal_to_alignment(invalid_out, strict=True))
+                dict(_clustal_to_tabular_msa(invalid_out,
+                                             constructor=CustomSequence))
 
     def test_clustal_sniffer_valid_files(self):
         for valid_out in self.valid_clustal_out:
@@ -243,5 +267,28 @@ UGCUGCAUCA---------------- 33
         # in self.invalid_clustal_out since an empty file is a valid output)
         self.assertEqual(_clustal_sniffer(StringIO()), (False, {}))
 
+    def test_no_constructor(self):
+        with six.assertRaisesRegex(self, ValueError, "`constructor`"):
+            _clustal_to_tabular_msa(self.valid_clustal_out[0])
+
+    def test_duplicate_labels(self):
+        msa = TabularMSA([CustomSequence('foo'),
+                          CustomSequence('bar')], index=['a', 'a'])
+
+        with six.assertRaisesRegex(self, ClustalFormatError, "index.*unique"):
+            with StringIO() as fh:
+                _tabular_msa_to_clustal(msa, fh)
+
+    def test_invalid_lengths(self):
+        fh = StringIO(
+            u"CLUSTAL\n"
+            "\n\n"
+            "abc             GCAU\n"
+            "def             -----\n")
+
+        with six.assertRaisesRegex(self, ClustalFormatError, "not aligned"):
+            _clustal_to_tabular_msa(fh, constructor=CustomSequence)
+
+
 if __name__ == '__main__':
     main()
diff --git a/skbio/io/format/tests/test_fasta.py b/skbio/io/format/tests/test_fasta.py
index 285cc51..413053f 100644
--- a/skbio/io/format/tests/test_fasta.py
+++ b/skbio/io/format/tests/test_fasta.py
@@ -11,21 +11,40 @@ from future.builtins import map, range, zip
 import six
 
 import io
+import string
 from unittest import TestCase, main
 from functools import partial
 
 import numpy as np
 
-from skbio import (Sequence, DNA, RNA, Protein, SequenceCollection, Alignment)
+from skbio import Sequence, DNA, RNA, Protein, TabularMSA
 from skbio.io import FASTAFormatError, QUALFormatError
 from skbio.io.format.fasta import (
-    _fasta_sniffer, _fasta_to_generator, _fasta_to_biological_sequence,
-    _fasta_to_dna_sequence, _fasta_to_rna_sequence, _fasta_to_protein_sequence,
-    _fasta_to_sequence_collection, _fasta_to_alignment, _generator_to_fasta,
-    _biological_sequence_to_fasta, _dna_sequence_to_fasta,
-    _rna_sequence_to_fasta, _protein_sequence_to_fasta,
-    _sequence_collection_to_fasta, _alignment_to_fasta)
+    _fasta_sniffer, _fasta_to_generator, _fasta_to_sequence,
+    _fasta_to_dna, _fasta_to_rna, _fasta_to_protein,
+    _fasta_to_tabular_msa, _generator_to_fasta,
+    _sequence_to_fasta, _dna_to_fasta, _rna_to_fasta, _protein_to_fasta,
+    _tabular_msa_to_fasta)
+from skbio.sequence._iupac_sequence import IUPACSequence
 from skbio.util import get_data_path
+from skbio.util._decorator import classproperty, overrides
+
+
+class CustomSequence(IUPACSequence):
+    @classproperty
+    @overrides(IUPACSequence)
+    def gap_chars(cls):
+        return set('-.')
+
+    @classproperty
+    @overrides(IUPACSequence)
+    def nondegenerate_chars(cls):
+        return set(string.ascii_letters)
+
+    @classproperty
+    @overrides(IUPACSequence)
+    def degenerate_map(cls):
+        return {}
 
 
 class SnifferTests(TestCase):
@@ -52,7 +71,7 @@ class SnifferTests(TestCase):
             'fasta_prot_seqs_odd_labels',
             'fasta_single_seq',
             'fasta_id_whitespace_replacement_empty_str',
-            'fasta_sequence_collection_different_type',
+            'fasta_tabular_msa_different_type',
             'fasta_id_whitespace_replacement_multi_char',
             'fasta_single_bio_seq_defaults',
             'fasta_single_prot_seq_defaults',
@@ -110,7 +129,7 @@ class SnifferTests(TestCase):
             'qual_multi_seq',
             'qual_multi_seq_roundtrip',
             'qual_prot_seqs_odd_labels',
-            'qual_sequence_collection_different_type',
+            'qual_tabular_msa_different_type',
             'qual_single_bio_seq_non_defaults',
             'qual_single_dna_seq_non_defaults',
             'qual_single_prot_seq_non_defaults',
@@ -243,10 +262,8 @@ class ReaderTests(TestCase):
             list(map(get_data_path, ['qual_prot_seqs_odd_labels']))
         )
 
-        # sequences that can be loaded into a SequenceCollection or Alignment.
-        # they are also a different type than Sequence in order to
-        # exercise the constructor parameter
-        self.sequence_collection_different_type = (
+        # sequences that can be loaded into a TabularMSA
+        self.tabular_msa_different_type = (
             [RNA('aUG',
                  metadata={'id': '', 'description': ''},
                  positional_metadata={'quality':
@@ -265,9 +282,9 @@ class ReaderTests(TestCase):
                  lowercase='introns')],
             {'constructor': partial(RNA, lowercase='introns')},
             list(map(get_data_path,
-                     ['fasta_sequence_collection_different_type'])),
+                     ['fasta_tabular_msa_different_type'])),
             list(map(get_data_path,
-                     ['qual_sequence_collection_different_type']))
+                     ['qual_tabular_msa_different_type']))
         )
 
         self.lowercase_seqs = (
@@ -400,7 +417,7 @@ class ReaderTests(TestCase):
              {'qual': get_data_path('qual_3_seqs_defaults_length_mismatch')},
              ValueError,
              'Number of positional metadata values \(3\) must match the '
-             'number of characters in the sequence \(4\)\.'),
+             'positional metadata axis length \(4\)\.'),
 
             # invalid qual scores (string value can't be converted to integer)
             ('fasta_3_seqs_defaults',
@@ -441,7 +458,7 @@ class ReaderTests(TestCase):
     def test_fasta_to_generator_valid_files(self):
         test_cases = (self.empty, self.single, self.multi,
                       self.odd_labels_different_type,
-                      self.sequence_collection_different_type,
+                      self.tabular_msa_different_type,
                       self.lowercase_seqs)
 
         # Strategy:
@@ -480,19 +497,19 @@ class ReaderTests(TestCase):
 
     def test_fasta_to_any_sequence(self):
         for constructor, reader_fn in ((Sequence,
-                                        _fasta_to_biological_sequence),
+                                        _fasta_to_sequence),
                                        (partial(DNA, validate=False,
                                                 lowercase='introns'),
-                                        partial(_fasta_to_dna_sequence,
+                                        partial(_fasta_to_dna,
                                                 validate=False,
                                                 lowercase='introns')),
                                        (partial(RNA, validate=False,
                                                 lowercase='introns'),
-                                        partial(_fasta_to_rna_sequence,
+                                        partial(_fasta_to_rna,
                                                 validate=False,
                                                 lowercase='introns')),
                                        (partial(Protein, lowercase='introns'),
-                                        partial(_fasta_to_protein_sequence,
+                                        partial(_fasta_to_protein,
                                                 validate=False,
                                                 lowercase='introns'))):
 
@@ -605,32 +622,37 @@ class ReaderTests(TestCase):
                                                '`seq_num`=0'):
                         reader_fn(fasta_fp, seq_num=0, qual=qual_fp)
 
-    def test_fasta_to_sequence_collection_and_alignment(self):
+    def test_fasta_to_tabular_msa(self):
         test_cases = (self.empty, self.single,
-                      self.sequence_collection_different_type,
+                      self.tabular_msa_different_type,
                       self.lowercase_seqs)
 
-        for constructor, reader_fn in ((SequenceCollection,
-                                        _fasta_to_sequence_collection),
-                                       (Alignment,
-                                        _fasta_to_alignment)):
-            # see comment in test_fasta_to_generator_valid_files (above) for
-            # testing strategy
-            for exp_list, kwargs, fasta_fps, qual_fps in test_cases:
-                exp = constructor(exp_list)
+        # see comment in test_fasta_to_generator_valid_files (above) for
+        # testing strategy
+        for exp_list, kwargs, fasta_fps, qual_fps in test_cases:
+            if 'constructor' not in kwargs:
+                kwargs['constructor'] = CustomSequence
+                exp_list = [CustomSequence(seq) for seq in exp_list]
 
-                for fasta_fp in fasta_fps:
-                    obs = reader_fn(fasta_fp, **kwargs)
+            exp = TabularMSA(exp_list)
 
-                    self.assertEqual(len(obs), len(exp))
-                    for o, e in zip(obs, exp):
-                        e = e.copy()
-                        del e.positional_metadata['quality']
-                        self.assertEqual(o, e)
+            for fasta_fp in fasta_fps:
+                obs = _fasta_to_tabular_msa(fasta_fp, **kwargs)
+
+                self.assertEqual(len(obs), len(exp))
+                for o, e in zip(obs, exp):
+                    e = e.copy()
+                    del e.positional_metadata['quality']
+                    self.assertEqual(o, e)
+
+                for qual_fp in qual_fps:
+                    obs = _fasta_to_tabular_msa(fasta_fp, qual=qual_fp,
+                                                **kwargs)
+                    self.assertEqual(obs, exp)
 
-                    for qual_fp in qual_fps:
-                        obs = reader_fn(fasta_fp, qual=qual_fp, **kwargs)
-                        self.assertEqual(obs, exp)
+    def test_fasta_to_tabular_msa_no_constructor(self):
+        with six.assertRaisesRegex(self, ValueError, '`constructor`'):
+            _fasta_to_tabular_msa(get_data_path('fasta_single_seq'))
 
 
 class WriterTests(TestCase):
@@ -669,22 +691,22 @@ class WriterTests(TestCase):
             lowercase='introns')
 
         seqs = [
-            RNA('UUUU',
+            CustomSequence(
+                'UUUU',
                 metadata={'id': 's\te\tq\t1', 'description': 'desc\n1'},
                 positional_metadata={'quality': [1234, 0, 0, 2]},
                 lowercase='introns'),
-            Sequence(
+            CustomSequence(
                 'CATC',
                 metadata={'id': 's\te\tq\t2', 'description': 'desc\n2'},
                 positional_metadata={'quality': [1, 11, 111, 11112]}),
-            Protein('sits',
-                    metadata={'id': 's\te\tq\t3', 'description': 'desc\n3'},
-                    positional_metadata={'quality': [12345, 678909, 999999,
-                                                     4242424242]},
-                    validate=False)
+            CustomSequence(
+                'sits',
+                metadata={'id': 's\te\tq\t3', 'description': 'desc\n3'},
+                positional_metadata={'quality': [12345, 678909, 999999,
+                                                 4242424242]})
         ]
-        self.seq_coll = SequenceCollection(seqs)
-        self.align = Alignment(seqs)
+        self.msa = TabularMSA(seqs)
 
         def empty_gen():
             raise StopIteration()
@@ -858,17 +880,6 @@ class WriterTests(TestCase):
                 _generator_to_fasta(obj, fh, **kwargs)
             fh.close()
 
-    def test_generator_to_fasta_sequence_lowercase_exception(self):
-        seq = Sequence('ACgt', metadata={'id': ''})
-        fh = io.StringIO()
-        with six.assertRaisesRegex(self, AttributeError,
-                                   "lowercase specified but class Sequence "
-                                   "does not support lowercase "
-                                   "functionality"):
-            _generator_to_fasta(SequenceCollection([seq]), fh,
-                                lowercase='introns')
-        fh.close()
-
     # light testing of object -> fasta writers to ensure interface is present
     # and kwargs are passed through. extensive testing of underlying writer is
     # performed above
@@ -879,27 +890,28 @@ class WriterTests(TestCase):
         id_ = 'f o o'
         desc = 'b\na\nr'
         test_data = (
-            (_biological_sequence_to_fasta,
+            (partial(_sequence_to_fasta, lowercase='introns'),
              Sequence('ACgt', metadata={'id': id_, 'description': desc},
-                      positional_metadata={'quality': range(1, 5)}),
+                      positional_metadata={'quality': range(1, 5)},
+                      lowercase='introns'),
              ('fasta_single_bio_seq_defaults',
               'fasta_single_bio_seq_non_defaults',
               'qual_single_bio_seq_non_defaults')),
-            (partial(_dna_sequence_to_fasta, lowercase='introns'),
+            (partial(_dna_to_fasta, lowercase='introns'),
              DNA('TAcg', metadata={'id': id_, 'description': desc},
                  positional_metadata={'quality': range(4)},
                  lowercase='introns'),
              ('fasta_single_dna_seq_defaults',
               'fasta_single_dna_seq_non_defaults',
               'qual_single_dna_seq_non_defaults')),
-            (partial(_rna_sequence_to_fasta, lowercase='introns'),
+            (partial(_rna_to_fasta, lowercase='introns'),
              RNA('uaCG', metadata={'id': id_, 'description': desc},
                  positional_metadata={'quality': range(2, 6)},
                  lowercase='introns'),
              ('fasta_single_rna_seq_defaults',
               'fasta_single_rna_seq_non_defaults',
               'qual_single_rna_seq_non_defaults')),
-            (partial(_protein_sequence_to_fasta, lowercase='introns'),
+            (partial(_protein_to_fasta, lowercase='introns'),
              Protein('PqQ', metadata={'id': id_, 'description': desc},
                      positional_metadata={'quality': [42, 41, 40]},
                      lowercase='introns'),
@@ -940,56 +952,36 @@ class WriterTests(TestCase):
             self.assertEqual(obs_qual, exp_qual)
 
     def test_any_sequences_to_fasta(self):
-        for fn, obj in ((_sequence_collection_to_fasta, self.seq_coll),
-                        (_alignment_to_fasta, self.align)):
-            # test writing with default parameters
-            fh = io.StringIO()
-            fn(obj, fh)
-            obs = fh.getvalue()
-            fh.close()
-
-            with io.open(get_data_path('fasta_3_seqs_defaults')) as fh:
-                exp = fh.read()
+        # test writing with default parameters
+        fh = io.StringIO()
+        _tabular_msa_to_fasta(self.msa, fh)
+        obs = fh.getvalue()
+        fh.close()
 
-            self.assertEqual(obs, exp)
+        with io.open(get_data_path('fasta_3_seqs_defaults')) as fh:
+            exp = fh.read()
 
-            # test writing with non-defaults
-            fasta_fh = io.StringIO()
-            qual_fh = io.StringIO()
-            fn(obj, fasta_fh, id_whitespace_replacement='*',
-               description_newline_replacement='+', max_width=3, qual=qual_fh)
-            obs_fasta = fasta_fh.getvalue()
-            obs_qual = qual_fh.getvalue()
-            fasta_fh.close()
-            qual_fh.close()
+        self.assertEqual(obs, exp)
 
-            with io.open(get_data_path('fasta_3_seqs_non_defaults')) as fh:
-                exp_fasta = fh.read()
-            with io.open(get_data_path('qual_3_seqs_non_defaults')) as fh:
-                exp_qual = fh.read()
+        # test writing with non-defaults
+        fasta_fh = io.StringIO()
+        qual_fh = io.StringIO()
+        _tabular_msa_to_fasta(self.msa, fasta_fh,
+                              id_whitespace_replacement='*',
+                              description_newline_replacement='+', max_width=3,
+                              qual=qual_fh)
+        obs_fasta = fasta_fh.getvalue()
+        obs_qual = qual_fh.getvalue()
+        fasta_fh.close()
+        qual_fh.close()
 
-            self.assertEqual(obs_fasta, exp_fasta)
-            self.assertEqual(obs_qual, exp_qual)
+        with io.open(get_data_path('fasta_3_seqs_non_defaults')) as fh:
+            exp_fasta = fh.read()
+        with io.open(get_data_path('qual_3_seqs_non_defaults')) as fh:
+            exp_qual = fh.read()
 
-            fh2 = io.StringIO()
-            with six.assertRaisesRegex(self, AttributeError,
-                                       "lowercase specified but class "
-                                       "Sequence does not support lowercase "
-                                       "functionality"):
-                fn(obj, fh2, lowercase='introns')
-            fh2.close()
-
-            fasta_fh2 = io.StringIO()
-            qual_fh2 = io.StringIO()
-            with six.assertRaisesRegex(self, AttributeError,
-                                       "lowercase specified but class "
-                                       "Sequence does not support lowercase "
-                                       "functionality"):
-                fn(obj, fasta_fh2, id_whitespace_replacement='*',
-                   description_newline_replacement='+', max_width=3,
-                   qual=qual_fh2, lowercase='introns')
-            fasta_fh2.close()
-            qual_fh2.close()
+        self.assertEqual(obs_fasta, exp_fasta)
+        self.assertEqual(obs_qual, exp_qual)
 
 
 class RoundtripTests(TestCase):
@@ -1019,52 +1011,50 @@ class RoundtripTests(TestCase):
             self.assertEqual(obs_fasta, exp_fasta)
             self.assertEqual(obs_qual, exp_qual)
 
-    def test_roundtrip_sequence_collections_and_alignments(self):
+    def test_roundtrip_tabular_msa(self):
         fps = list(map(lambda e: list(map(get_data_path, e)),
                        [('empty', 'empty'),
-                        ('fasta_sequence_collection_different_type',
-                         'qual_sequence_collection_different_type')]))
+                        ('fasta_tabular_msa_different_type',
+                         'qual_tabular_msa_different_type')]))
 
-        for reader, writer in ((_fasta_to_sequence_collection,
-                                _sequence_collection_to_fasta),
-                               (_fasta_to_alignment,
-                                _alignment_to_fasta)):
-            for fasta_fp, qual_fp in fps:
-                # read
-                obj1 = reader(fasta_fp, qual=qual_fp)
+        reader = partial(_fasta_to_tabular_msa, constructor=CustomSequence)
+        writer = _tabular_msa_to_fasta
+        for fasta_fp, qual_fp in fps:
+            # read
+            obj1 = reader(fasta_fp, qual=qual_fp)
 
-                # write
-                fasta_fh = io.StringIO()
-                qual_fh = io.StringIO()
-                writer(obj1, fasta_fh, qual=qual_fh)
-                fasta_fh.seek(0)
-                qual_fh.seek(0)
+            # write
+            fasta_fh = io.StringIO()
+            qual_fh = io.StringIO()
+            writer(obj1, fasta_fh, qual=qual_fh)
+            fasta_fh.seek(0)
+            qual_fh.seek(0)
 
-                # read
-                obj2 = reader(fasta_fh, qual=qual_fh)
-                fasta_fh.close()
-                qual_fh.close()
+            # read
+            obj2 = reader(fasta_fh, qual=qual_fh)
+            fasta_fh.close()
+            qual_fh.close()
 
-                self.assertEqual(obj1, obj2)
+            self.assertEqual(obj1, obj2)
 
     def test_roundtrip_biological_sequences(self):
         fps = list(map(lambda e: list(map(get_data_path, e)),
                        [('fasta_multi_seq_roundtrip',
                          'qual_multi_seq_roundtrip'),
-                        ('fasta_sequence_collection_different_type',
-                         'qual_sequence_collection_different_type')]))
+                        ('fasta_tabular_msa_different_type',
+                         'qual_tabular_msa_different_type')]))
 
-        for reader, writer in ((_fasta_to_biological_sequence,
-                                _biological_sequence_to_fasta),
-                               (partial(_fasta_to_dna_sequence,
+        for reader, writer in ((_fasta_to_sequence,
+                                _sequence_to_fasta),
+                               (partial(_fasta_to_dna,
                                         validate=False),
-                                _dna_sequence_to_fasta),
-                               (partial(_fasta_to_rna_sequence,
+                                _dna_to_fasta),
+                               (partial(_fasta_to_rna,
                                         validate=False),
-                                _rna_sequence_to_fasta),
-                               (partial(_fasta_to_protein_sequence,
+                                _rna_to_fasta),
+                               (partial(_fasta_to_protein,
                                         validate=False),
-                                _protein_sequence_to_fasta)):
+                                _protein_to_fasta)):
             for fasta_fp, qual_fp in fps:
                 # read
                 obj1 = reader(fasta_fp, qual=qual_fp)
diff --git a/skbio/io/format/tests/test_fastq.py b/skbio/io/format/tests/test_fastq.py
index f575478..cd7bab2 100644
--- a/skbio/io/format/tests/test_fastq.py
+++ b/skbio/io/format/tests/test_fastq.py
@@ -11,19 +11,19 @@ from future.builtins import zip
 import six
 
 import io
+import string
 import unittest
 import warnings
 from functools import partial
 
-from skbio import (read, write, Sequence, DNA, RNA, Protein,
-                   SequenceCollection, Alignment)
+from skbio import read, write, Sequence, DNA, RNA, Protein, TabularMSA
 from skbio.io import FASTQFormatError
 from skbio.io.format.fastq import (
-    _fastq_sniffer, _fastq_to_generator, _fastq_to_sequence_collection,
-    _fastq_to_alignment, _generator_to_fastq, _sequence_collection_to_fastq,
-    _alignment_to_fastq)
-
+    _fastq_sniffer, _fastq_to_generator, _fastq_to_tabular_msa,
+    _generator_to_fastq, _tabular_msa_to_fastq)
+from skbio.sequence._iupac_sequence import IUPACSequence
 from skbio.util import get_data_path
+from skbio.util._decorator import classproperty, overrides
 
 import numpy as np
 
@@ -299,12 +299,9 @@ class TestReaders(unittest.TestCase):
                     _drop_kwargs(observed_kwargs, 'seq_num')
                     constructor = observed_kwargs.get('constructor', Sequence)
 
-                    # Can't use partials for this because the read
-                    # function below can't operate on partials
                     expected_kwargs = {}
-                    if hasattr(constructor, 'lowercase'):
-                        expected_kwargs['lowercase'] = 'introns'
-                        observed_kwargs['lowercase'] = 'introns'
+                    expected_kwargs['lowercase'] = 'introns'
+                    observed_kwargs['lowercase'] = 'introns'
 
                     expected = [constructor(c[2],
                                             metadata={'id': c[0],
@@ -376,11 +373,8 @@ class TestReaders(unittest.TestCase):
 
                         _drop_kwargs(observed_kwargs, 'constructor')
 
-                        # Can't use partials for this because the read
-                        # function below can't operate on partials
-                        if hasattr(constructor, 'lowercase'):
-                            expected_kwargs['lowercase'] = 'introns'
-                            observed_kwargs['lowercase'] = 'introns'
+                        expected_kwargs['lowercase'] = 'introns'
+                        observed_kwargs['lowercase'] = 'introns'
 
                         seq_num = observed_kwargs.get('seq_num', 1)
                         c = components[seq_num - 1]
@@ -397,47 +391,36 @@ class TestReaders(unittest.TestCase):
                                         **observed_kwargs)
                         self.assertEqual(observed, expected)
 
-    def test_fastq_to_sequence_collection(self):
-        for valid_files, kwargs, components in self.valid_configurations:
-            for valid in valid_files:
-                for observed_kwargs in kwargs:
-                    _drop_kwargs(observed_kwargs, 'seq_num')
-                    constructor = observed_kwargs.get('constructor', Sequence)
-
-                    # Can't use partials for this because the read
-                    # function below can't operate on partials
-                    expected_kwargs = {}
-                    if hasattr(constructor, 'lowercase'):
-                        expected_kwargs['lowercase'] = 'introns'
-                        observed_kwargs['lowercase'] = 'introns'
+    def test_fastq_to_tabular_msa(self):
+        class CustomSequence(IUPACSequence):
+            @classproperty
+            @overrides(IUPACSequence)
+            def gap_chars(cls):
+                return set('-.')
 
-                    expected = SequenceCollection(
-                        [constructor(
-                            c[2], metadata={'id': c[0], 'description': c[1]},
-                            positional_metadata={'quality': np.array(c[3],
-                                                 np.uint8)},
-                            **expected_kwargs)
-                         for c in components])
+            @classproperty
+            @overrides(IUPACSequence)
+            def nondegenerate_chars(cls):
+                return set(string.ascii_letters)
 
-                    observed = _fastq_to_sequence_collection(valid,
-                                                             **observed_kwargs)
-                    self.assertEqual(observed, expected)
+            @classproperty
+            @overrides(IUPACSequence)
+            def degenerate_map(cls):
+                return {}
 
-    def test_fastq_to_alignment(self):
         for valid_files, kwargs, components in self.valid_configurations:
             for valid in valid_files:
                 for observed_kwargs in kwargs:
                     _drop_kwargs(observed_kwargs, 'seq_num')
-                    constructor = observed_kwargs.get('constructor', Sequence)
+                    if 'constructor' not in observed_kwargs:
+                        observed_kwargs['constructor'] = CustomSequence
+                    constructor = observed_kwargs['constructor']
 
-                    # Can't use partials for this because the read
-                    # function below can't operate on partials
                     expected_kwargs = {}
-                    if hasattr(constructor, 'lowercase'):
-                        expected_kwargs['lowercase'] = 'introns'
-                        observed_kwargs['lowercase'] = 'introns'
+                    expected_kwargs['lowercase'] = 'introns'
+                    observed_kwargs['lowercase'] = 'introns'
 
-                    expected = Alignment(
+                    expected = TabularMSA(
                         [constructor(
                             c[2], metadata={'id': c[0],
                                             'description': c[1]},
@@ -446,9 +429,13 @@ class TestReaders(unittest.TestCase):
                             **expected_kwargs)
                          for c in components])
 
-                    observed = _fastq_to_alignment(valid, **observed_kwargs)
+                    observed = _fastq_to_tabular_msa(valid, **observed_kwargs)
                     self.assertEqual(observed, expected)
 
+    def test_fastq_to_tabular_msa_no_constructor(self):
+        with six.assertRaisesRegex(self, ValueError, '`constructor`'):
+            _fastq_to_tabular_msa(get_data_path('fastq_multi_seq_sanger'))
+
 
 class TestWriters(unittest.TestCase):
     def setUp(self):
@@ -507,11 +494,8 @@ class TestWriters(unittest.TestCase):
                     if constructor is RNA:
                         observed_kwargs['validate'] = False
 
-                    # Can't use partials for this because the read
-                    # function below can't operate on partials
-                    if hasattr(constructor, 'lowercase'):
-                        expected_kwargs['lowercase'] = 'introns'
-                        observed_kwargs['lowercase'] = 'introns'
+                    expected_kwargs['lowercase'] = 'introns'
+                    observed_kwargs['lowercase'] = 'introns'
 
                     fh = io.StringIO()
                     for c in components:
@@ -530,30 +514,10 @@ class TestWriters(unittest.TestCase):
 
                     self.assertEqual(observed, expected)
 
-    def test_sequence_collection_to_fastq_kwargs_passed(self):
-        for components, kwargs_expected_fp in self.valid_files:
-            for kwargs, expected_fp in kwargs_expected_fp:
-                obj = SequenceCollection([
-                    DNA(c[2], metadata={'id': c[0], 'description': c[1]},
-                        positional_metadata={'quality': c[3]},
-                        lowercase='introns')
-                    for c in components])
-
-                fh = io.StringIO()
-                kwargs['lowercase'] = 'introns'
-                _sequence_collection_to_fastq(obj, fh, **kwargs)
-                observed = fh.getvalue()
-                fh.close()
-
-                with io.open(expected_fp) as f:
-                    expected = f.read()
-
-                self.assertEqual(observed, expected)
-
-    def test_alignment_to_fastq_kwargs_passed(self):
+    def test_tabular_msa_to_fastq_kwargs_passed(self):
         for components, kwargs_expected_fp in self.valid_files:
             for kwargs, expected_fp in kwargs_expected_fp:
-                obj = Alignment([
+                obj = TabularMSA([
                     Protein(c[2], metadata={'id': c[0], 'description': c[1]},
                             positional_metadata={'quality': c[3]},
                             lowercase='introns')
@@ -561,7 +525,7 @@ class TestWriters(unittest.TestCase):
 
                 fh = io.StringIO()
                 kwargs['lowercase'] = 'introns'
-                _alignment_to_fastq(obj, fh, **kwargs)
+                _tabular_msa_to_fastq(obj, fh, **kwargs)
                 observed = fh.getvalue()
                 fh.close()
 
diff --git a/skbio/io/format/tests/test_genbank.py b/skbio/io/format/tests/test_genbank.py
new file mode 100644
index 0000000..81d89bb
--- /dev/null
+++ b/skbio/io/format/tests/test_genbank.py
@@ -0,0 +1,503 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import map, zip
+
+import io
+import numpy as np
+import pandas as pd
+import numpy.testing as npt
+import six
+from unittest import TestCase, main
+from datetime import datetime
+
+from skbio import Protein, DNA, RNA, Sequence
+from skbio.util import get_data_path
+from skbio.io import GenBankFormatError
+from skbio.io.format.genbank import (
+    _genbank_sniffer,
+    _genbank_to_generator, _genbank_to_sequence,
+    _genbank_to_dna, _genbank_to_rna, _genbank_to_protein,
+    _parse_locus, _parse_reference,
+    _parse_loc_str, _parse_section_default,
+    _generator_to_genbank, _sequence_to_genbank,
+    _protein_to_genbank, _rna_to_genbank, _dna_to_genbank,
+    _serialize_locus)
+
+
+class SnifferTests(TestCase):
+    def setUp(self):
+        self.positive_fps = list(map(get_data_path, [
+            'genbank_5_blanks_start_of_file',
+            'genbank_single_record_upper',
+            'genbank_single_record_lower',
+            'genbank_multi_records']))
+
+        self.negative_fps = list(map(get_data_path, [
+            'empty',
+            'whitespace_only',
+            'genbank_6_blanks_start_of_file',
+            'genbank_w_beginning_whitespace',
+            'genbank_missing_locus_name']))
+
+    def test_positives(self):
+        for fp in self.positive_fps:
+            self.assertEqual(_genbank_sniffer(fp), (True, {}))
+
+    def test_negatives(self):
+        for fp in self.negative_fps:
+            self.assertEqual(_genbank_sniffer(fp), (False, {}))
+
+
+class GenBankIOTests(TestCase):
+    # parent class to set up test data for the child class
+    def setUp(self):
+        # test locus line
+        self.locus = (
+            (['LOCUS       NC_005816   9609 bp   '
+              'DNA   circular   CON   07-FEB-2015'],
+             {'division': 'CON', 'mol_type': 'DNA', 'shape': 'circular',
+              'locus_name': 'NC_005816', 'date': datetime(2015, 2, 7, 0, 0),
+              'unit': 'bp', 'size': 9609}),
+            (['LOCUS       SCU49845   5028 bp   '
+              'DNA      PLN   21-JUN-1999'],
+             {'division': 'PLN', 'mol_type': 'DNA', 'shape': None,
+             'locus_name': 'SCU49845', 'date': datetime(1999, 6, 21, 0, 0),
+              'unit': 'bp', 'size': 5028}),
+            (['LOCUS       NP_001832   360 aa      '
+              'linear   PRI   18-DEC-2001'],
+             {'division': 'PRI', 'mol_type': None, 'shape': 'linear',
+              'locus_name': 'NP_001832', 'date': datetime(2001, 12, 18, 0, 0),
+              'unit': 'aa', 'size': 360}))
+
+        # test single record and read uppercase sequence
+        self.single_upper_fp = get_data_path('genbank_single_record_upper')
+        self.single_lower_fp = get_data_path('genbank_single_record_lower')
+        self.single = (
+            'GSREILDFK',
+            {'LOCUS': {'date': datetime(1994, 9, 23, 0, 0),
+                       'division': 'BCT',
+                       'locus_name': 'AAB29917',
+                       'mol_type': None,
+                       'shape': 'linear',
+                       'size': 9,
+                       'unit': 'aa'}},
+            None,
+            Protein)
+
+        self.single_rna_fp = get_data_path('genbank_single_record')
+        self.single_rna = (
+            'gugaaacaaagcacuauugcacuggcugucuuaccguuacuguuuaccccugugacaaaagcc',
+            {'ACCESSION': 'M14399',
+             'COMMENT': 'Original source text: E.coli, cDNA to mRNA.',
+             'DEFINITION': u"alkaline phosphatase signal mRNA, 5' end.",
+             'FEATURES': [{'db_xref': '"taxon:562"',
+                           'index_': 0,
+                           'left_partial_': False,
+                           'location': '1..63',
+                           'mol_type': '"mRNA"',
+                           'organism': '"Escherichia coli"',
+                           'rc_': False,
+                           'right_partial_': False,
+                           'type_': 'source'},
+                          {'codon_start': '1',
+                           'db_xref': [
+                               '"GI:145230"', '"taxon:562"', '"taxon:561"'],
+                           'index_': 1,
+                           'left_partial_': False,
+                           'location': '1..>63',
+                           'note': '"alkaline phosphatase signal peptide"',
+                           'protein_id': '"AAA23431.1"',
+                           'rc_': False,
+                           'right_partial_': True,
+                           'transl_table': '11',
+                           'translation': '"MKQSTIALAVLPLLFTPVTKA"',
+                           'type_': 'CDS'}],
+             'KEYWORDS': 'alkaline phosphatase; signal peptide.',
+             'LOCUS': {'date': datetime(1993, 4, 26, 0, 0),
+                       'division': 'BCT',
+                       'locus_name': 'ECOALKP',
+                       'mol_type': 'mRNA',
+                       'shape': 'linear',
+                       'size': 63,
+                       'unit': 'bp'},
+             'SOURCE': {'ORGANISM': 'Escherichia coli',
+                        'taxonomy': 'Bacteria; Proteobacteria; '
+                        'Gammaproteobacteria; Enterobacteriales; '
+                        'Enterobacteriaceae; Escherichia.'},
+             'VERSION': 'M14399.1  GI:145229'},
+            pd.DataFrame({0: np.ones(63, dtype=bool),
+                          1: np.ones(63, dtype=bool)}),
+            RNA)
+
+        # test:
+        # 1. multiple records in one file
+        # 2. lowercase sequence
+        # 3. DNA, RNA, Protein type
+        # 4. variation of formats
+        self.multi_fp = get_data_path('genbank_multi_records')
+        self.multi = (
+            ('gsreildfk',
+             {'ACCESSION': 'AAB29917',
+              'COMMENT': 'Method: direct peptide sequencing.',
+              'DBSOURCE': 'accession AAB29917.1',
+              'DEFINITION': 'L-carnitine amidase {N-terminal}',
+              'FEATURES': [{'index_': 0,
+                            'left_partial_': False,
+                            'location': '1..9',
+                            'organism': '"Bacteria"',
+                            'rc_': False,
+                            'right_partial_': False,
+                            'type_': 'source'},
+                           {'index_': 1,
+                            'left_partial_': False,
+                            'location': '1..>9',
+                            'product': '"L-carnitine amidase"',
+                            'rc_': False,
+                            'right_partial_': True,
+                            'type_': 'Protein'}],
+              'KEYWORDS': '.',
+              'LOCUS': {'date': datetime(1994, 9, 23, 0, 0),
+                        'division': 'BCT',
+                        'locus_name': 'AAB29917',
+                        'mol_type': None,
+                        'shape': 'linear',
+                        'size': 9,
+                        'unit': 'aa'},
+              'REFERENCE': [{'AUTHORS': 'Joeres,U. and Kula,M.R.',
+                             'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
+                             'PUBMED': '7764422',
+                             'REFERENCE': '1  (residues 1 to 9)',
+                             'REMARK': 'from the original journal article.',
+                             'TITLE': 'a microbial L-carnitine amidase'},
+                            {'AUTHORS': 'Joeres,U. and Kula,M.R.',
+                             'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
+                             'PUBMED': '7764422',
+                             'REFERENCE': '1  (residues 1 to 9)',
+                             'TITLE': 'a microbial L-carnitine amidase'}],
+              'SOURCE': {'ORGANISM': 'Bacteria',
+                         'taxonomy': 'Unclassified.'},
+              'VERSION': 'AAB29917.1  GI:545426'},
+             pd.DataFrame({0: np.ones(9, dtype=bool),
+                           1: np.ones(9, dtype=bool)}),
+             Protein),
+
+            ('catgcaggc',
+             {'ACCESSION': 'HQ018078',
+              'DEFINITION': 'Uncultured Xylanimonas sp.16S, partial',
+              'FEATURES': [{'country': '"Brazil: Parana, Paranavai"',
+                            'environmental_sample': '',
+                            'index_': 0,
+                            'left_partial_': False,
+                            'location': '1..9',
+                            'rc_': False,
+                            'right_partial_': False,
+                            'type_': 'source'},
+                           {'index_': 1,
+                            'left_partial_': True,
+                            'location': 'complement(<2..>8)',
+                            'product': '"16S ribosomal RNA"',
+                            'rc_': True,
+                            'right_partial_': True,
+                            'type_': 'rRNA'}],
+              'KEYWORDS': 'ENV.',
+              'LOCUS': {'date': datetime(2010, 8, 29, 0, 0),
+                        'division': 'ENV',
+                        'locus_name': 'HQ018078',
+                        'mol_type': 'DNA',
+                        'shape': 'linear',
+                        'size': 9,
+                        'unit': 'bp'},
+              'SOURCE': {'ORGANISM': 'uncultured Xylanimonas sp.',
+                         'taxonomy': 'Bacteria; Actinobacteria; '
+                         'Micrococcales; Promicromonosporaceae; '
+                         'Xylanimonas; environmental samples.'},
+              'VERSION': 'HQ018078.1  GI:304421728'},
+             pd.DataFrame({0: [True] * 9,
+                           1: [False] + [True] * 7 + [False]}),
+             DNA))
+
+
+class ReaderTests(GenBankIOTests):
+    def test_parse_reference(self):
+        lines = '''
+REFERENCE   1  (bases 1 to 154478)
+  AUTHORS   Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.
+  TITLE     Complete structure of the chloroplast genome of
+            Arabidopsis thaliana
+  JOURNAL   DNA Res. 6 (5), 283-290 (1999)
+   PUBMED   10574454'''.split('\n')
+
+        exp = {'AUTHORS': 'Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.',
+               'JOURNAL': 'DNA Res. 6 (5), 283-290 (1999)',
+               'PUBMED': '10574454',
+               'REFERENCE': '1  (bases 1 to 154478)',
+               'TITLE': ('Complete structure of the chloroplast genome of'
+                         ' Arabidopsis thaliana')}
+        self.assertEqual(_parse_reference(lines), exp)
+
+    def test_parse_locus(self):
+        for serialized, parsed in self.locus:
+            self.assertEqual(_parse_locus(serialized), parsed)
+
+    def test_parse_locus_invalid(self):
+        lines = [
+            # missing unit
+            ['LOCUS       NC_005816               9609 '
+             '    DNA     circular CON 07-FEB-2015'],
+            # missing division
+            ['LOCUS       SCU49845     5028 bp'
+             '    DNA                    21-JUN-1999'],
+            # wrong date format
+            ['LOCUS       NP_001832                360 aa'
+             '            linear   PRI 2001-12-18']]
+        for line in lines:
+            with six.assertRaisesRegex(self, GenBankFormatError,
+                                       'Could not parse the LOCUS line:.*'):
+                _parse_locus(line)
+
+    def test_parse_section_default(self):
+        lines = [
+            ['FOO  blah blah',
+             '     blah'],
+            ['FOO=blah',
+             '    blah'],
+            ['FOO']]
+        kwargs = [{'join_delimitor': '=', 'return_label': False},
+                  {'label_delimitor': '=', 'join_delimitor': '',
+                   'return_label': True},
+                  {'label_delimitor': '=', 'join_delimitor': '=',
+                   'return_label': True}]
+        expects = ['blah blah=blah',
+                   ('FOO', 'blahblah'),
+                   ('FOO', '')]
+        for i, j, k in zip(lines, kwargs, expects):
+            self.assertEqual(k, _parse_section_default(i, **j))
+
+    def test_parse_loc_str(self):
+        length = 12
+
+        examples = [
+            '',
+            '9',  # a single base in the presented sequence
+            '3..8',
+            '<3..8',
+            '1..>8',
+            'complement(3..8)',
+            'complement(join(3..5,7..9))',
+            'join(3..5,7..9)',
+            'J00194.1:1..9',
+            '1.9',
+            '1^9']
+
+        expects = [
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': False},
+             np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': False},
+             np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': False},
+             np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': True, 'rc_': False},
+             np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': True, 'left_partial_': False, 'rc_': False},
+             np.array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': True},
+             np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': True},
+             np.array([0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': False},
+             np.array([0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0], dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': False},
+             np.zeros(length, dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': False},
+             np.zeros(length, dtype=bool)),
+            ({'right_partial_': False, 'left_partial_': False, 'rc_': False},
+             np.zeros(length, dtype=bool))]
+        for example, expect in zip(examples, expects):
+            parsed = _parse_loc_str(example, length)
+            self.assertDictEqual(parsed[0], expect[0])
+            npt.assert_equal(parsed[1], expect[1])
+
+    def test_parse_loc_str_invalid(self):
+        length = 12
+        examples = [
+            'abc',
+            '3-8']
+        for example in examples:
+            with six.assertRaisesRegex(self, GenBankFormatError,
+                                       'Could not parse location string: '
+                                       '"%s"' % example):
+                _parse_loc_str(example, length)
+
+    def test_genbank_to_generator_single(self):
+        # test single record and uppercase sequence
+        for c in [Sequence, Protein]:
+            obs = next(_genbank_to_generator(
+                self.single_upper_fp, constructor=c))
+            exp = c(self.single[0], metadata=self.single[1],
+                    positional_metadata=self.single[2])
+            self.assertEqual(exp, obs)
+
+    def test_genbank_to_generator(self):
+        for i, obs in enumerate(_genbank_to_generator(self.multi_fp)):
+            seq, md, pmd, constructor = self.multi[i]
+            exp = constructor(seq, metadata=md, lowercase=True,
+                              positional_metadata=pmd)
+            self.assertEqual(exp, obs)
+
+    def test_genbank_to_sequence(self):
+        for i, exp in enumerate(self.multi):
+            obs = _genbank_to_sequence(self.multi_fp, seq_num=i+1)
+            exp = Sequence(exp[0], metadata=exp[1], lowercase=True,
+                           positional_metadata=exp[2])
+            self.assertEqual(exp, obs)
+
+    def test_genbank_to_rna(self):
+        seq, md, pmd, constructor = self.single_rna
+        obs = _genbank_to_rna(self.single_rna_fp)
+        exp = constructor(seq, metadata=md,
+                          lowercase=True, positional_metadata=pmd)
+        self.assertEqual(exp, obs)
+
+    def test_genbank_to_dna(self):
+        i = 1
+        exp = self.multi[i]
+        obs = _genbank_to_dna(self.multi_fp, seq_num=i+1)
+        exp = DNA(exp[0], metadata=exp[1], lowercase=True,
+                  positional_metadata=exp[2])
+        self.assertEqual(exp, obs)
+
+    def test_genbank_to_protein(self):
+        i = 0
+        exp = self.multi[i]
+        obs = _genbank_to_protein(self.multi_fp, seq_num=i+1)
+        exp = Protein(exp[0], metadata=exp[1],
+                      lowercase=True, positional_metadata=exp[2])
+        self.assertEqual(exp, obs)
+
+
+class WriterTests(GenBankIOTests):
+    def test_serialize_locus(self):
+        for serialized, parsed in self.locus:
+            self.assertEqual(
+                _serialize_locus('LOCUS', parsed), serialized[0] + '\n')
+
+    def test_generator_to_genbank(self):
+        seq, md, pmd, constructor = self.single
+        obj = constructor(seq, md, pmd)
+        fh = io.StringIO()
+        _generator_to_genbank([obj], fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.single_lower_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+    def test_sequence_to_genbank(self):
+        fh = io.StringIO()
+        for i, (seq, md, pmd, constructor) in enumerate(self.multi):
+            obj = Sequence(seq, md, pmd, lowercase=True)
+            _sequence_to_genbank(obj, fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.multi_fp) as fh:
+            exp = fh.read()
+        self.assertEqual(obs, exp)
+
+    def test_dna_protein_to_genbank(self):
+        writers = [_protein_to_genbank,
+                   _dna_to_genbank]
+        fh = io.StringIO()
+        for i, (seq, md, pmd, constructor) in enumerate(self.multi):
+            obj = constructor(seq, md, pmd, lowercase=True)
+            writers[i](obj, fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.multi_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+    def test_rna_to_genbank(self):
+        fh = io.StringIO()
+        seq, md, pmd, constructor = self.single_rna
+        obj = constructor(seq, md, pmd, lowercase=True)
+        _rna_to_genbank(obj, fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.single_rna_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+
+class RoundtripTests(GenBankIOTests):
+    def test_roundtrip_generator(self):
+        fh = io.StringIO()
+        _generator_to_genbank(_genbank_to_generator(self.multi_fp), fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.multi_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+    def test_roundtrip_rna(self):
+        fh = io.StringIO()
+        _rna_to_genbank(_genbank_to_rna(self.single_rna_fp), fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.single_rna_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+    def test_roundtrip_dna(self):
+        fh = io.StringIO()
+        _dna_to_genbank(_genbank_to_dna(self.single_rna_fp), fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.single_rna_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+    def test_roundtrip_protein(self):
+        fh = io.StringIO()
+        _protein_to_genbank(_genbank_to_protein(self.single_lower_fp), fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.single_lower_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+    def test_roundtrip_sequence(self):
+        fh = io.StringIO()
+        _sequence_to_genbank(_genbank_to_sequence(self.single_rna_fp), fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with io.open(self.single_rna_fp) as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/io/format/tests/test_ordination.py b/skbio/io/format/tests/test_ordination.py
index 327768b..1cf87ef 100644
--- a/skbio/io/format/tests/test_ordination.py
+++ b/skbio/io/format/tests/test_ordination.py
@@ -13,15 +13,15 @@ import io
 from unittest import TestCase, main
 
 import numpy as np
+import pandas as pd
 import numpy.testing as npt
 
+from skbio import OrdinationResults
 from skbio.io import OrdinationFormatError
 from skbio.io.format.ordination import (
     _ordination_to_ordination_results, _ordination_results_to_ordination,
     _ordination_sniffer)
-from skbio.stats.ordination import (
-    OrdinationResults, assert_ordination_results_equal)
-from skbio.util import get_data_path
+from skbio.util import get_data_path, assert_ordination_results_equal
 
 
 class OrdinationTestData(TestCase):
@@ -77,95 +77,117 @@ class OrdinationResultsReaderWriterTests(OrdinationTestData):
         # self.valid_fps
 
         # CA results
-        eigvals = np.array([0.0961330159181, 0.0409418140138])
-        species = np.array([[0.408869425742, 0.0695518116298],
-                            [-0.1153860437, -0.299767683538],
-                            [-0.309967102571, 0.187391917117]])
-        site = np.array([[-0.848956053187, 0.882764759014],
-                         [-0.220458650578, -1.34482000302],
-                         [1.66697179591, 0.470324389808]])
+        axes_ids = ['CA1', 'CA2']
+        species_ids = ['Species1', 'Species2', 'Species3']
+        site_ids = ['Site1', 'Site2', 'Site3']
+        eigvals = pd.Series([0.0961330159181, 0.0409418140138], axes_ids)
+        species = pd.DataFrame([[0.408869425742, 0.0695518116298],
+                                [-0.1153860437, -0.299767683538],
+                                [-0.309967102571, 0.187391917117]],
+                               index=species_ids, columns=axes_ids)
+        site = pd.DataFrame([[-0.848956053187, 0.882764759014],
+                             [-0.220458650578, -1.34482000302],
+                             [1.66697179591, 0.470324389808]],
+                            index=site_ids, columns=axes_ids)
         biplot = None
         site_constraints = None
         prop_explained = None
-        species_ids = ['Species1', 'Species2', 'Species3']
-        site_ids = ['Site1', 'Site2', 'Site3']
-        ca_scores = OrdinationResults(eigvals=eigvals, species=species,
-                                      site=site, biplot=biplot,
-                                      site_constraints=site_constraints,
-                                      proportion_explained=prop_explained,
-                                      species_ids=species_ids,
-                                      site_ids=site_ids)
+        ca_scores = OrdinationResults(
+            'CA', 'Correspondence Analysis', eigvals=eigvals, features=species,
+            samples=site, biplot_scores=biplot,
+            sample_constraints=site_constraints,
+            proportion_explained=prop_explained)
+
         # CCA results
-        eigvals = np.array([0.366135830393, 0.186887643052, 0.0788466514249,
-                            0.082287840501, 0.0351348475787, 0.0233265839374,
-                            0.0099048981912, 0.00122461669234,
-                            0.000417454724117])
-        species = np.loadtxt(
-            get_data_path('ordination_exp_Ordination_CCA_species'))
-        site = np.loadtxt(get_data_path('ordination_exp_Ordination_CCA_site'))
-        biplot = np.array([[-0.169746767979, 0.63069090084, 0.760769036049],
-                           [-0.994016563505, 0.0609533148724,
-                            -0.0449369418179],
-                           [0.184352565909, -0.974867543612, 0.0309865007541]])
-        site_constraints = np.loadtxt(
-            get_data_path('ordination_exp_Ordination_CCA_site_constraints'))
-        prop_explained = None
+        axes_ids = ['CCA%d' % i for i in range(1, 10)]
         species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
                        'Species4', 'Species5', 'Species6', 'Species7',
                        'Species8']
         site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
                     'Site6', 'Site7', 'Site8', 'Site9']
-        cca_scores = OrdinationResults(eigvals=eigvals, species=species,
-                                       site=site, biplot=biplot,
-                                       site_constraints=site_constraints,
-                                       proportion_explained=prop_explained,
-                                       species_ids=species_ids,
-                                       site_ids=site_ids)
+
+        eigvals = pd.Series([0.366135830393, 0.186887643052, 0.0788466514249,
+                             0.082287840501, 0.0351348475787, 0.0233265839374,
+                             0.0099048981912, 0.00122461669234,
+                             0.000417454724117], axes_ids)
+        species = pd.DataFrame(np.loadtxt(
+            get_data_path('ordination_exp_Ordination_CCA_species')),
+            index=species_ids, columns=axes_ids)
+        site = pd.DataFrame(
+            np.loadtxt(get_data_path('ordination_exp_Ordination_CCA_site')),
+            index=site_ids, columns=axes_ids)
+        biplot = pd.DataFrame(
+            [[-0.169746767979, 0.63069090084, 0.760769036049],
+             [-0.994016563505, 0.0609533148724, -0.0449369418179],
+             [0.184352565909, -0.974867543612, 0.0309865007541]],
+            columns=axes_ids[:3])
+        site_constraints = pd.DataFrame(np.loadtxt(
+            get_data_path('ordination_exp_Ordination_CCA_site_constraints')),
+            index=site_ids, columns=axes_ids)
+        prop_explained = None
+        cca_scores = OrdinationResults('CCA',
+                                       'Canonical Correspondence Analysis',
+                                       eigvals=eigvals, features=species,
+                                       samples=site, biplot_scores=biplot,
+                                       sample_constraints=site_constraints,
+                                       proportion_explained=prop_explained)
+
         # PCoA results
-        eigvals = np.array([0.512367260461, 0.300719094427, 0.267912066004,
-                            0.208988681078, 0.19169895326, 0.16054234528,
-                            0.15017695712, 0.122457748167, 0.0])
-        species = None
-        site = np.loadtxt(get_data_path('ordination_exp_Ordination_PCoA_site'))
-        biplot = None
-        site_constraints = None
-        prop_explained = np.array([0.267573832777, 0.15704469605,
-                                   0.139911863774, 0.109140272454,
-                                   0.100111048503, 0.0838401161912,
-                                   0.0784269939011, 0.0639511763509, 0.0])
+        axes_ids = ['PC%d' % i for i in range(1, 10)]
         species_ids = None
         site_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
                     'PC.355', 'PC.607', 'PC.634']
-        pcoa_scores = OrdinationResults(eigvals=eigvals, species=species,
-                                        site=site, biplot=biplot,
-                                        site_constraints=site_constraints,
-                                        proportion_explained=prop_explained,
-                                        species_ids=species_ids,
-                                        site_ids=site_ids)
+        eigvals = pd.Series([0.512367260461, 0.300719094427, 0.267912066004,
+                             0.208988681078, 0.19169895326, 0.16054234528,
+                             0.15017695712, 0.122457748167, 0.0], axes_ids)
+        species = None
+        site = pd.DataFrame(
+            np.loadtxt(get_data_path('ordination_exp_Ordination_PCoA_site')),
+            index=site_ids, columns=axes_ids)
+        biplot = None
+        site_constraints = None
+        prop_explained = pd.Series([0.267573832777, 0.15704469605,
+                                    0.139911863774, 0.109140272454,
+                                    0.100111048503, 0.0838401161912,
+                                    0.0784269939011, 0.0639511763509, 0.0],
+                                   axes_ids)
+        pcoa_scores = OrdinationResults('PCoA',
+                                        'Principal Coordinate Analysis',
+                                        eigvals=eigvals, features=species,
+                                        samples=site, biplot_scores=biplot,
+                                        sample_constraints=site_constraints,
+                                        proportion_explained=prop_explained)
+
         # RDA results
-        eigvals = np.array([25.8979540892, 14.9825779819, 8.93784077262,
-                            6.13995623072, 1.68070536498, 0.57735026919,
-                            0.275983624351])
-        species = np.loadtxt(
-            get_data_path('ordination_exp_Ordination_RDA_species'))
-        site = np.loadtxt(get_data_path('ordination_exp_Ordination_RDA_site'))
-        biplot = np.array([[0.422650019179, -0.559142585857, -0.713250678211],
-                           [0.988495963777, 0.150787422017, -0.0117848614073],
-                           [-0.556516618887, 0.817599992718, 0.147714267459],
-                           [-0.404079676685, -0.9058434809, -0.127150316558]])
-        site_constraints = np.loadtxt(
-            get_data_path('ordination_exp_Ordination_RDA_site_constraints'))
-        prop_explained = None
+        axes_ids = ['RDA%d' % i for i in range(1, 8)]
         species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
                        'Species4', 'Species5']
         site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
                     'Site6', 'Site7', 'Site8', 'Site9']
-        rda_scores = OrdinationResults(eigvals=eigvals, species=species,
-                                       site=site, biplot=biplot,
-                                       site_constraints=site_constraints,
-                                       proportion_explained=prop_explained,
-                                       species_ids=species_ids,
-                                       site_ids=site_ids)
+        eigvals = pd.Series([25.8979540892, 14.9825779819, 8.93784077262,
+                            6.13995623072, 1.68070536498, 0.57735026919,
+                            0.275983624351], axes_ids)
+        species = pd.DataFrame(np.loadtxt(
+            get_data_path('ordination_exp_Ordination_RDA_species')),
+            index=species_ids, columns=axes_ids)
+        site = pd.DataFrame(
+            np.loadtxt(get_data_path('ordination_exp_Ordination_RDA_site')),
+            index=site_ids, columns=axes_ids)
+        biplot = pd.DataFrame(
+            [[0.422650019179, -0.559142585857, -0.713250678211],
+             [0.988495963777, 0.150787422017, -0.0117848614073],
+             [-0.556516618887, 0.817599992718, 0.147714267459],
+             [-0.404079676685, -0.9058434809, -0.127150316558]],
+            columns=axes_ids[:3])
+        site_constraints = pd.DataFrame(np.loadtxt(
+            get_data_path('ordination_exp_Ordination_RDA_site_constraints')),
+            index=site_ids, columns=axes_ids)
+        prop_explained = None
+        rda_scores = OrdinationResults(
+            'RDA', 'Redundancy Analysis', eigvals=eigvals, features=species,
+            samples=site, biplot_scores=biplot,
+            sample_constraints=site_constraints,
+            proportion_explained=prop_explained)
 
         self.ordination_results_objs = [ca_scores, cca_scores, pcoa_scores,
                                         rda_scores]
@@ -173,7 +195,9 @@ class OrdinationResultsReaderWriterTests(OrdinationTestData):
     def test_read_valid_files(self):
         for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
                 obs = _ordination_to_ordination_results(fp)
-                assert_ordination_results_equal(obs, obj)
+                assert_ordination_results_equal(
+                    obs, obj, ignore_method_names=True,
+                    ignore_axis_labels=True, ignore_biplot_scores_labels=True)
 
     def test_read_invalid_files(self):
         for invalid_fp, error_msg_regexp, _ in self.invalid_fps:
diff --git a/skbio/io/format/tests/test_phylip.py b/skbio/io/format/tests/test_phylip.py
index 3e07f60..6106e4f 100644
--- a/skbio/io/format/tests/test_phylip.py
+++ b/skbio/io/format/tests/test_phylip.py
@@ -10,45 +10,200 @@ from __future__ import absolute_import, division, print_function
 import six
 
 import io
-from unittest import TestCase, main
+import unittest
 
 from skbio.io import PhylipFormatError
-from skbio.io.format.phylip import _alignment_to_phylip
-from skbio import Alignment, DNA, RNA
+from skbio.io.format.phylip import (
+    _tabular_msa_to_phylip, _phylip_to_tabular_msa, _phylip_sniffer)
+from skbio import TabularMSA, DNA, RNA
 from skbio.util import get_data_path
 
 
-class AlignmentWriterTests(TestCase):
+class TestSniffer(unittest.TestCase):
+    def setUp(self):
+        self.positives = [get_data_path(e) for e in [
+            'phylip_dna_3_seqs',
+            'phylip_single_seq_long',
+            'phylip_single_seq_short',
+            'phylip_two_chunks',
+            'phylip_variable_length_ids',
+            'phylip_varied_whitespace_in_seqs',
+            'phylip_whitespace_in_header_1',
+            'phylip_whitespace_in_header_2',
+            'phylip_whitespace_in_header_3',
+        ]]
+
+        # negative tests for sniffer don't include
+        # phylip_invalid_empty_line_between_seqs, phylip_invalid_too_few_seqs,
+        # phylip_invalid_too_many_seqs - because sniffer only reads first seq
+        self.negatives = [get_data_path(e) for e in [
+            'empty',
+            'whitespace_only',
+            'phylip_invalid_empty_line_after_header',
+            'phylip_invalid_empty_line_before_header',
+            'phylip_invalid_header_too_long',
+            'phylip_invalid_header_too_short',
+            'phylip_invalid_no_header',
+            'phylip_invalid_seq_too_long',
+            'phylip_invalid_seq_too_short',
+            'phylip_invalid_zero_seq_len',
+            'phylip_invalid_zero_seqs',
+        ]]
+
+    def test_positives(self):
+        for fp in self.positives:
+            self.assertEqual(_phylip_sniffer(fp), (True, {}))
+
+    def test_negatives(self):
+        for fp in self.negatives:
+            self.assertEqual(_phylip_sniffer(fp), (False, {}))
+
+
+class TestReaders(unittest.TestCase):
+    def setUp(self):
+        self.valid_configurations = [
+            ([get_data_path('phylip_dna_3_seqs')],
+             [('..ACC-GTTGG..', 'd1'), ('TTACCGGT-GGCC', 'd2'),
+              ('.-ACC-GTTGC--', 'd3')]
+             ),
+
+            ([get_data_path('phylip_single_seq_long')],
+             [('..ACC-GTTGG..AATGC.C----', 'foo')]
+             ),
+
+            ([get_data_path('phylip_single_seq_short')],
+             [('-', '')]
+             ),
+
+            ([get_data_path('phylip_two_chunks'),
+              get_data_path('phylip_varied_whitespace_in_seqs'),
+              get_data_path('phylip_whitespace_in_header_1'),
+              get_data_path('phylip_whitespace_in_header_2'),
+              get_data_path('phylip_whitespace_in_header_3'),
+              ],
+             [('..ACC-GTTGG..AATGC.C', 'foo'), ('TTACCGGT-GGCCTA-GCAT', 'bar')]
+             ),
+
+            ([get_data_path('phylip_variable_length_ids')],
+             [('.-ACGT', ''), ('TGCA-.', 'a'), ('.ACGT-', 'bb'),
+              ('TGCA-.', '1'), ('AAAAAA', 'abcdefghij'),
+              ('GGGGGG', 'ab def42ij')]
+             ),
+
+        ]
+
+        self.positive_fps = list(map(get_data_path, [
+            'phylip_dna_3_seqs',
+            'phylip_single_seq_long',
+            'phylip_single_seq_short',
+            'phylip_two_chunks',
+            'phylip_variable_length_ids',
+            'phylip_varied_whitespace_in_seqs',
+            'phylip_whitespace_in_header_1',
+            'phylip_whitespace_in_header_2',
+            'phylip_whitespace_in_header_3',
+        ]))
+
+        self.invalid_files = [(get_data_path(e[0]), e[1], e[2]) for e in [
+            ('empty', PhylipFormatError,
+             'This file is empty.'),
+
+            ('whitespace_only', PhylipFormatError,
+             'Found non-header line .*: ""'),
+
+            ('phylip_invalid_empty_line_after_header', PhylipFormatError,
+             'Empty lines are not allowed.'),
+
+            ('phylip_invalid_empty_line_before_header', PhylipFormatError,
+             'Found non-header line .*: ""'),
+
+            ('phylip_invalid_empty_line_between_seqs', PhylipFormatError,
+             'Empty lines are not allowed.'),
+
+            ('phylip_invalid_header_too_long', PhylipFormatError,
+             'Found non-header line .*: "2 20 extra_text"'),
+
+            ('phylip_invalid_header_too_short', PhylipFormatError,
+             'Found non-header line .*: " 20"'),
+
+            ('phylip_invalid_no_header', PhylipFormatError,
+             'Found non-header line .*: "foo .*"'),
+
+            ('phylip_invalid_seq_too_long', PhylipFormatError,
+             'The length of sequence foo is not 20 as specified .*.'),
+
+            ('phylip_invalid_seq_too_short', PhylipFormatError,
+             'The length of sequence foo is not 20 as specified .*.'),
+
+            ('phylip_invalid_too_few_seqs', PhylipFormatError,
+             'The number of sequences is not .* as specified .*.'),
+
+            ('phylip_invalid_too_many_seqs', PhylipFormatError,
+             'The number of sequences is not .* as specified in the header.'),
+
+            ('phylip_invalid_zero_seq_len', PhylipFormatError,
+             'The number of sequences and the length must be positive.'),
+
+            ('phylip_invalid_zero_seqs', PhylipFormatError,
+             'The number of sequences and the length must be positive.'),
+        ]]
+
+    def test_phylip_to_tabular_msa_invalid_files(self):
+        for fp, error_type, error_msg_regex in self.invalid_files:
+            with six.assertRaisesRegex(self, error_type, error_msg_regex):
+                _phylip_to_tabular_msa(fp, constructor=DNA)
+
+    def test_phylip_to_tabular_msa_no_constructor(self):
+        with six.assertRaisesRegex(self, ValueError, '`constructor`'):
+            _phylip_to_tabular_msa(get_data_path('phylip_dna_3_seqs'))
+
+    def test_phylip_to_tabular_msa_valid_files(self):
+        for valid_files, components in self.valid_configurations:
+            for valid in valid_files:
+                observed = _phylip_to_tabular_msa(valid, constructor=DNA)
+
+                expected_seqs = []
+                expected_index = []
+                for seq, ID in components:
+                    expected_seqs.append(DNA(seq))
+                    expected_index.append(ID)
+                expected = TabularMSA(expected_seqs, index=expected_index)
+
+                self.assertEqual(observed, expected)
+
+
+class TestWriters(unittest.TestCase):
     def setUp(self):
         # ids all same length, seqs longer than 10 chars
-        dna_3_seqs = Alignment([
+        dna_3_seqs = TabularMSA([
             DNA('..ACC-GTTGG..', metadata={'id': "d1"}),
             DNA('TTACCGGT-GGCC', metadata={'id': "d2"}),
-            DNA('.-ACC-GTTGC--', metadata={'id': "d3"})])
+            DNA('.-ACC-GTTGC--', metadata={'id': "d3"})], minter='id')
 
         # id lengths from 0 to 10, with mixes of numbers, characters, and
-        # spaces. sequence characters are a mix of cases and gap characters.
-        # sequences are shorter than 10 chars
-        variable_length_ids = Alignment([
-            RNA('.-ACGU', metadata={'id': ''}),
-            RNA('UGCA-.', metadata={'id': 'a'}),
-            RNA('.ACGU-', metadata={'id': 'bb'}),
-            RNA('ugca-.', metadata={'id': '1'}, validate=False),
-            RNA('AaAaAa', metadata={'id': 'abcdefghij'}, validate=False),
-            RNA('GGGGGG', metadata={'id': 'ab def42ij'})])
+        # spaces. sequences are shorter than 10 chars
+        variable_length_ids = TabularMSA([
+            DNA('.-ACGT', metadata={'id': ''}),
+            DNA('TGCA-.', metadata={'id': 'a'}),
+            DNA('.ACGT-', metadata={'id': 'bb'}),
+            DNA('TGCA-.', metadata={'id': '1'}),
+            DNA('AAAAAA', metadata={'id': 'abcdefghij'}),
+            DNA('GGGGGG', metadata={'id': 'ab def42ij'})], minter='id')
 
         # sequences with 20 chars = exactly two chunks of size 10
-        two_chunks = Alignment([
+        two_chunks = TabularMSA([
             DNA('..ACC-GTTGG..AATGC.C', metadata={'id': 'foo'}),
-            DNA('TTACCGGT-GGCCTA-GCAT', metadata={'id': 'bar'})])
+            DNA('TTACCGGT-GGCCTA-GCAT', metadata={'id': 'bar'})], minter='id')
 
         # single sequence with more than two chunks
-        single_seq_long = Alignment([
-            DNA('..ACC-GTTGG..AATGC.C----', metadata={'id': 'foo'})])
+        single_seq_long = TabularMSA([
+            DNA('..ACC-GTTGG..AATGC.C----', metadata={'id': 'foo'})],
+            minter='id')
 
         # single sequence with only a single character (minimal writeable
         # alignment)
-        single_seq_short = Alignment([DNA('-', metadata={'id': ''})])
+        single_seq_short = TabularMSA([DNA('-', metadata={'id': ''})],
+                                      minter='id')
 
         # alignments that can be written in phylip format
         self.objs = [dna_3_seqs, variable_length_ids, two_chunks,
@@ -62,22 +217,23 @@ class AlignmentWriterTests(TestCase):
         # expected error message regexps
         self.invalid_objs = [
             # no seqs
-            (Alignment([]), 'one sequence'),
+            (TabularMSA([]), 'one sequence'),
 
             # no positions
-            (Alignment([DNA('', metadata={'id': "d1"}),
-                        DNA('', metadata={'id': "d2"})]), 'one position'),
+            (TabularMSA([DNA('', metadata={'id': "d1"}),
+                         DNA('', metadata={'id': "d2"})]), 'one position'),
 
             # ids too long
-            (Alignment([RNA('ACGU', metadata={'id': "foo"}),
-                        RNA('UGCA', metadata={'id': "alongsequenceid"})]),
+            (TabularMSA([RNA('ACGU', metadata={'id': "foo"}),
+                         RNA('UGCA', metadata={'id': "alongsequenceid"})],
+                        minter='id'),
              '10.*alongsequenceid')
         ]
 
     def test_write(self):
         for fp, obj in zip(self.fps, self.objs):
             fh = io.StringIO()
-            _alignment_to_phylip(obj, fh)
+            _tabular_msa_to_phylip(obj, fh)
             obs = fh.getvalue()
             fh.close()
 
@@ -91,7 +247,7 @@ class AlignmentWriterTests(TestCase):
             fh = io.StringIO()
             with six.assertRaisesRegex(self, PhylipFormatError,
                                        error_msg_regexp):
-                _alignment_to_phylip(invalid_obj, fh)
+                _tabular_msa_to_phylip(invalid_obj, fh)
 
             # ensure nothing was written to the file before the error was
             # thrown. TODO remove this check when #674 is resolved
@@ -101,4 +257,4 @@ class AlignmentWriterTests(TestCase):
 
 
 if __name__ == '__main__':
-    main()
+    unittest.main()
diff --git a/skbio/io/format/tests/test_qseq.py b/skbio/io/format/tests/test_qseq.py
index 74cb7d0..bc4e1a4 100644
--- a/skbio/io/format/tests/test_qseq.py
+++ b/skbio/io/format/tests/test_qseq.py
@@ -12,12 +12,11 @@ from future.builtins import zip
 
 import unittest
 
-from skbio import SequenceCollection, Sequence, DNA, RNA, Protein
+from skbio import Sequence, DNA, RNA, Protein
 from skbio import read
 from skbio.util import get_data_path
 from skbio.io import QSeqFormatError
-from skbio.io.format.qseq import (
-    _qseq_to_generator, _qseq_to_sequence_collection, _qseq_sniffer)
+from skbio.io.format.qseq import _qseq_to_generator, _qseq_sniffer
 import numpy as np
 
 
@@ -288,48 +287,6 @@ class TestQSeqToGenerator(TestQSeqBase):
                     self.assertEqual(o, e)
 
 
-class TestQSeqToSequenceCollection(TestQSeqBase):
-    def setUp(self):
-        super(TestQSeqToSequenceCollection, self).setUp()
-        self.valid_files += [
-            (get_data_path('empty'), [{}, {'variant': 'sanger'}],
-             SequenceCollection([]))
-        ]
-
-    def test_invalid_files(self):
-        for invalid, kwargs, errors, etype in self.invalid_files:
-            with self.assertRaises(etype) as cm:
-                for kwarg in kwargs:
-                    _drop_kwargs(kwarg, 'seq_num')
-                    _qseq_to_sequence_collection(invalid, **kwarg)
-            for e in errors:
-                self.assertIn(e, str(cm.exception))
-
-    def test_valid_files(self):
-        for valid, kwargs, components in self.valid_files:
-            for kwarg in kwargs:
-                _drop_kwargs(kwarg, 'seq_num')
-                constructor = kwarg.get('constructor', Sequence)
-                expected = SequenceCollection([
-                    constructor(
-                        c['sequence'],
-                        metadata={'id': c['id'],
-                                  'machine_name': c['machine_name'],
-                                  'run_number': c['run_number'],
-                                  'lane_number': c['lane_number'],
-                                  'tile_number': c['tile_number'],
-                                  'x': c['x'],
-                                  'y': c['y'],
-                                  'index': c['index'],
-                                  'read_number': c['read_number']},
-                        positional_metadata={
-                            'quality': np.array(c['quality'], dtype=np.uint8)})
-                    for c in components])
-
-                observed = _qseq_to_sequence_collection(valid, **kwarg)
-                self.assertEqual(observed, expected)
-
-
 class TestQSeqToSequences(TestQSeqBase):
     def test_invalid_files(self):
         for constructor in [Sequence, DNA, RNA, Protein]:
diff --git a/skbio/io/registry.py b/skbio/io/registry.py
index 0e18451..1bf7737 100644
--- a/skbio/io/registry.py
+++ b/skbio/io/registry.py
@@ -183,7 +183,7 @@ from . import (UnrecognizedFormatError, ArgumentOverrideWarning,
                FormatIdentificationWarning)
 from .util import _resolve_file, open_file, open_files, _d as _open_kwargs
 from skbio.util._misc import make_sentinel, find_sentinels
-from skbio.util._decorator import stable
+from skbio.util._decorator import stable, classonlymethod
 
 FileSentinel = make_sentinel("FileSentinel")
 
@@ -619,7 +619,7 @@ class IORegistry(object):
         """Add read method if any formats have a reader for `cls`."""
         read_formats = registry.list_read_formats(cls)
 
-        @classmethod
+        @classonlymethod
         def read(cls, file, format=None, **kwargs):
             return registry.read(file, into=cls, format=format, **kwargs)
 
@@ -853,9 +853,9 @@ class Format(object):
         ...         return True, {'version': version}
         ...     return False, {}
         ...
-        >>> myformat_sniffer([u"myformat2\\n", u"some content\\n"])
+        >>> myformat_sniffer(["myformat2\\n", "some content\\n"])
         (True, {'version': 2})
-        >>> myformat_sniffer([u"something else\\n"])
+        >>> myformat_sniffer(["something else\\n"])
         (False, {})
 
         """
@@ -946,9 +946,9 @@ class Format(object):
         ...     return MyObject(fh.readlines()[1:])
         ...
         >>> registry.monkey_patch() # If developing skbio, this isn't needed
-        >>> MyObject.read([u"myformat2\\n", u"some content here!\\n"],
+        >>> MyObject.read(["myformat2\\n", "some content here!\\n"],
         ...               format='myformat').content
-        [u'some content here!\\n']
+        ['some content here!\\n']
 
         """
         self._check_registration(cls)
@@ -1031,14 +1031,14 @@ class Format(object):
         ...
         >>> @myformat.writer(MyObject)
         ... def myformat_reader(obj, fh):
-        ...     fh.write(u"myformat2\\n")
+        ...     fh.write("myformat2\\n")
         ...     for c in obj.content:
         ...         fh.write(c)
         ...
         >>> registry.monkey_patch() # If developing skbio, this isn't needed
-        >>> obj = MyObject([u"some content here!\\n"])
+        >>> obj = MyObject(["some content here!\\n"])
         >>> obj.write([], format='myformat')
-        [u'myformat2\\n', u'some content here!\\n']
+        ['myformat2\\n', 'some content here!\\n']
 
         """
         self._check_registration(cls)
diff --git a/skbio/io/tests/test_registry.py b/skbio/io/tests/test_registry.py
index 376a713..da0f9b3 100644
--- a/skbio/io/tests/test_registry.py
+++ b/skbio/io/tests/test_registry.py
@@ -23,7 +23,8 @@ from skbio.io import (FormatIdentificationWarning, UnrecognizedFormatError,
 from skbio.io.registry import (IORegistry, FileSentinel, Format,
                                DuplicateRegistrationError,
                                InvalidRegistrationError)
-from skbio.util import TestingUtilError, get_data_path
+from skbio.util import get_data_path
+from skbio.util._exception import TestingUtilError
 from skbio import DNA, read, write
 
 
diff --git a/skbio/io/tests/test_util.py b/skbio/io/tests/test_util.py
index 7cdeef5..88b4480 100644
--- a/skbio/io/tests/test_util.py
+++ b/skbio/io/tests/test_util.py
@@ -7,6 +7,7 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
+import six
 
 import unittest
 import tempfile
@@ -533,9 +534,19 @@ class TestIterableReaderWriter(unittest.TestCase):
             self.assertIsInstance(result, io.TextIOBase)
             self.assertEqual(result.readlines(), l)
 
+    def test_open_invalid_iterable_missing_u(self):
+        is_py2 = six.PY2
+        six.PY2 = True
+        try:
+            with six.assertRaisesRegex(self, skbio.io.IOSourceError,
+                                       ".*Prepend.*`u`.*"):
+                skbio.io.open([b'abc'])
+        finally:
+            six.PY2 = is_py2
+
     def test_open_invalid_iterable(self):
         with self.assertRaises(skbio.io.IOSourceError):
-            skbio.io.open([b'abc'])
+            skbio.io.open([1, 2, 3])
 
     def test_open_empty_iterable(self):
         with skbio.io.open([]) as result:
diff --git a/skbio/io/util.py b/skbio/io/util.py
index 161c599..2a35f9b 100644
--- a/skbio/io/util.py
+++ b/skbio/io/util.py
@@ -221,10 +221,10 @@ def open_file(file, **kwargs):
     --------
     Here our input isn't a filehandle and so `f` will get closed.
 
-    >>> with open_file([u'a\n']) as f:
+    >>> with open_file(['a\n']) as f:
     ...     f.read()
     ...
-    u'a\n'
+    'a\n'
     >>> f.closed
     True
 
@@ -237,7 +237,7 @@ def open_file(file, **kwargs):
     >>> with open_file(file) as f:
     ...     f.read()
     ...
-    u'a\nb\nc\n'
+    'a\nb\nc\n'
     >>> f.closed
     False
     >>> file.closed
diff --git a/skbio/sequence/__init__.py b/skbio/sequence/__init__.py
index 77b0e43..08ee93a 100644
--- a/skbio/sequence/__init__.py
+++ b/skbio/sequence/__init__.py
@@ -4,20 +4,19 @@ Sequences (:mod:`skbio.sequence`)
 
 .. currentmodule:: skbio.sequence
 
-This module provides classes for storing and working with biological sequences,
-including generic sequences which have no restrictions on which characters can
-be included, and sequences based on IUPAC-defined sets of allowed characters
-(including degenerate characters), including ``DNA``, ``RNA`` and ``Protein``
-sequences. Common operations are defined as methods, for example computing the
-reverse complement of a DNA sequence, or searching for N-glycosylation motifs
-in ``Protein`` sequences. Class attributes are available to obtain valid
-character sets, complement maps for different sequence types, and for obtaining
-degenerate character definitions. Additionally this module defines the
-``GeneticCode`` class, which represents an immutable object that translates DNA
-or RNA sequences into protein sequences.
+This module provides classes for storing and working with sequences, including
+generic/nonbiological sequences which have no alphabet restrictions
+(``Sequence``) and sequences based on IUPAC-defined alphabets (``DNA``,
+``RNA``, ``Protein``). Common operations are defined as methods, for example
+computing the reverse complement of a DNA sequence, or searching for
+N-glycosylation motifs in protein sequences. Class attributes provide valid
+character sets, complement maps for different sequence types, and degenerate
+character definitions. Additionally this module defines the ``GeneticCode``
+class, which represents an immutable object that translates DNA or RNA
+sequences into protein sequences.
 
 The primary information stored for each different type of sequence object is
-the underlying sequence data itself. This is stored as an immutable Numpy
+the underlying sequence data itself. This is stored as an immutable numpy
 array. Additionally, each type of sequence may include optional metadata
 and positional metadata. Note that metadata and positional metadata are
 mutable.
@@ -36,16 +35,29 @@ Classes
 
 Examples
 --------
->>> from skbio import DNA, RNA
-
-New sequences are created with optional metadata and positional metadata
-fields. Metadata is stored as a Python dict, while positional metadata
-becomes a Pandas DataFrame.
+New sequences are created with optional metadata and positional metadata.
+Metadata is stored as a Python ``dict``, while positional metadata is stored as
+a pandas ``DataFrame``.
 
->>> d = DNA('ACCGGGTA')
+>>> from skbio import DNA, RNA
 >>> d = DNA('ACCGGGTA', metadata={'id':"my-sequence", 'description':"GFP"},
 ...          positional_metadata={'quality':[22, 25, 22, 18, 23, 25, 25, 25]})
->>> d = DNA('ACCGGTA', metadata={'id':"my-sequence"})
+>>> d
+DNA
+-----------------------------
+Metadata:
+    'description': 'GFP'
+    'id': 'my-sequence'
+Positional metadata:
+    'quality': <dtype: int64>
+Stats:
+    length: 8
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 62.50%
+-----------------------------
+0 ACCGGGTA
 
 New sequences can also be created from existing sequences, for example as their
 reverse complement or degapped (i.e., unaligned) version.
@@ -267,11 +279,11 @@ True
 
 Class-level methods contain information about the molecule types.
 
->>> DNA.degenerate_map['B']
-set(['C', 'T', 'G'])
+>>> sorted(DNA.degenerate_map['B'])
+['C', 'G', 'T']
 
->>> RNA.degenerate_map['B']
-set(['C', 'U', 'G'])
+>>> sorted(RNA.degenerate_map['B'])
+['C', 'G', 'U']
 
 """
 
diff --git a/skbio/sequence/_base.py b/skbio/sequence/_base.py
deleted file mode 100644
index 9e71c2b..0000000
--- a/skbio/sequence/_base.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-
-class ElasticLines(object):
-    """Store blocks of content separated by dashed lines.
-
-    Each dashed line (separator) is as long as the longest content
-    (non-separator) line.
-
-    """
-
-    def __init__(self):
-        self._lines = []
-        self._separator_idxs = []
-        self._max_line_len = -1
-
-    def add_line(self, line):
-        line_len = len(line)
-        if line_len > self._max_line_len:
-            self._max_line_len = line_len
-        self._lines.append(line)
-
-    def add_lines(self, lines):
-        for line in lines:
-            self.add_line(line)
-
-    def add_separator(self):
-        self._lines.append(None)
-        self._separator_idxs.append(len(self._lines) - 1)
-
-    def to_str(self):
-        separator = '-' * self._max_line_len
-        for idx in self._separator_idxs:
-            self._lines[idx] = separator
-        return '\n'.join(self._lines)
diff --git a/skbio/sequence/_dna.py b/skbio/sequence/_dna.py
index d4abd77..cf788f4 100644
--- a/skbio/sequence/_dna.py
+++ b/skbio/sequence/_dna.py
@@ -8,9 +8,9 @@
 
 from __future__ import absolute_import, division, print_function
 
+import skbio
 from skbio.util._decorator import classproperty, overrides
 from skbio.util._decorator import stable
-from ._rna import RNA
 from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
 from ._iupac_sequence import IUPACSequence
 
@@ -30,6 +30,14 @@ class DNA(IUPACSequence, NucleotideMixin):
         Arbitrary per-character metadata. For example, quality data from
         sequencing reads. Must be able to be passed directly to the Pandas
         DataFrame constructor.
+    lowercase : bool or str, optional
+        If ``True``, lowercase sequence characters will be converted to
+        uppercase characters in order to be valid IUPAC DNA characters. If
+        ``False``, no characters will be converted. If a str, it will be
+        treated as a key into the positional metadata of the object. All
+        lowercase characters will be converted to uppercase, and a ``True``
+        value will be stored in a boolean array in the positional metadata
+        under the key.
     validate : bool, optional
         If ``True``, validation will be performed to ensure that all sequence
         characters are in the IUPAC DNA character set. If ``False``, validation
@@ -39,14 +47,6 @@ class DNA(IUPACSequence, NucleotideMixin):
         work or behave as expected.** Only turn off validation if you are
         certain that the sequence characters are valid. To store sequence data
         that is not IUPAC-compliant, use ``Sequence``.
-    lowercase : bool or str, optional
-        If ``True``, lowercase sequence characters will be converted to
-        uppercase characters in order to be valid IUPAC DNA characters. If
-        ``False``, no characters will be converted. If a str, it will be
-        treated as a key into the positional metadata of the object. All
-        lowercase characters will be converted to uppercase, and a ``True``
-        value will be stored in a boolean array in the positional metadata
-        under the key.
 
     Attributes
     ----------
@@ -55,6 +55,7 @@ class DNA(IUPACSequence, NucleotideMixin):
     positional_metadata
     alphabet
     gap_chars
+    default_gap_char
     nondegenerate_chars
     degenerate_chars
     degenerate_map
@@ -198,8 +199,9 @@ class DNA(IUPACSequence, NucleotideMixin):
             positional_metadata = self.positional_metadata
 
         # turn off validation because `seq` is guaranteed to be valid
-        return RNA(seq, metadata=metadata,
-                   positional_metadata=positional_metadata, validate=False)
+        return skbio.RNA(seq, metadata=metadata,
+                         positional_metadata=positional_metadata,
+                         validate=False)
 
     @stable(as_of="0.4.0")
     def translate(self, *args, **kwargs):
@@ -222,6 +224,7 @@ class DNA(IUPACSequence, NucleotideMixin):
 
         See Also
         --------
+        RNA.reverse_transcribe
         RNA.translate
         translate_six_frames
         transcribe
diff --git a/skbio/sequence/_genetic_code.py b/skbio/sequence/_genetic_code.py
index cd9950f..5856be3 100644
--- a/skbio/sequence/_genetic_code.py
+++ b/skbio/sequence/_genetic_code.py
@@ -11,10 +11,10 @@ from __future__ import absolute_import, division, print_function
 import numpy as np
 from future.builtins import range
 
-from skbio.util._decorator import classproperty, stable
+from skbio.util._decorator import classproperty, stable, classonlymethod
 from skbio._base import SkbioObject
 from skbio.sequence import Protein, RNA
-from skbio.sequence._base import ElasticLines
+from skbio._base import ElasticLines
 
 
 class GeneticCode(SkbioObject):
@@ -141,7 +141,7 @@ class GeneticCode(SkbioObject):
             cls.__offset_table = table
         return cls.__offset_table
 
-    @classmethod
+    @classonlymethod
     @stable(as_of="0.4.0")
     def from_ncbi(cls, table_id=1):
         """Return NCBI genetic code specified by table ID.
diff --git a/skbio/sequence/_iupac_sequence.py b/skbio/sequence/_iupac_sequence.py
index e777936..3de92db 100644
--- a/skbio/sequence/_iupac_sequence.py
+++ b/skbio/sequence/_iupac_sequence.py
@@ -13,9 +13,11 @@ from abc import ABCMeta, abstractproperty
 from itertools import product
 
 import numpy as np
-from six import string_types
 
-from skbio.util._decorator import classproperty, overrides
+import re
+
+from skbio.util._decorator import (classproperty, overrides, stable,
+                                   experimental)
 from skbio.util._misc import MiniRegistry
 from ._sequence import Sequence
 
@@ -32,6 +34,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
     positional_metadata
     alphabet
     gap_chars
+    default_gap_char
     nondegenerate_chars
     degenerate_chars
     degenerate_map
@@ -55,11 +58,6 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
        A Cornish-Bowden
 
     """
-    # ASCII is built such that the difference between uppercase and lowercase
-    # is the 6th bit.
-    _ascii_invert_case_bit_offset = 32
-    _number_of_extended_ascii_codes = 256
-    _ascii_lowercase_boundary = 90
     __validation_mask = None
     __degenerate_codes = None
     __nondegenerate_codes = None
@@ -97,6 +95,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         return cls.__gap_codes
 
     @classproperty
+    @stable(as_of='0.4.0')
     def alphabet(cls):
         """Return valid IUPAC characters.
 
@@ -111,6 +110,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         return cls.degenerate_chars | cls.nondegenerate_chars | cls.gap_chars
 
     @classproperty
+    @stable(as_of='0.4.0')
     def gap_chars(cls):
         """Return characters defined as gaps.
 
@@ -123,6 +123,24 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         return set('-.')
 
     @classproperty
+    @experimental(as_of='0.4.1')
+    def default_gap_char(cls):
+        """Gap character to use when constructing a new gapped sequence.
+
+        This character is used when it is necessary to represent gap characters
+        in a new sequence. For example, a majority consensus sequence will use
+        this character to represent gaps.
+
+        Returns
+        -------
+        str
+            Default gap character.
+
+        """
+        return '-'
+
+    @classproperty
+    @stable(as_of='0.4.0')
     def degenerate_chars(cls):
         """Return degenerate IUPAC characters.
 
@@ -136,6 +154,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
 
     @abstractproperty
     @classproperty
+    @stable(as_of='0.4.0')
     def nondegenerate_chars(cls):
         """Return non-degenerate IUPAC characters.
 
@@ -149,6 +168,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
 
     @abstractproperty
     @classproperty
+    @stable(as_of='0.4.0')
     def degenerate_map(cls):
         """Return mapping of degenerate to non-degenerate characters.
 
@@ -167,31 +187,13 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
 
     @overrides(Sequence)
     def __init__(self, sequence, metadata=None, positional_metadata=None,
-                 validate=True, lowercase=False):
+                 lowercase=False, validate=True):
         super(IUPACSequence, self).__init__(
-            sequence, metadata, positional_metadata)
-
-        if lowercase is False:
-            pass
-        elif lowercase is True or isinstance(lowercase, string_types):
-            lowercase_mask = self._bytes > self._ascii_lowercase_boundary
-            self._convert_to_uppercase(lowercase_mask)
-
-            # If it isn't True, it must be a string_type
-            if not (lowercase is True):
-                self.positional_metadata[lowercase] = lowercase_mask
-        else:
-            raise TypeError("lowercase keyword argument expected a bool or "
-                            "string, but got %s" % type(lowercase))
+            sequence, metadata, positional_metadata, lowercase)
 
         if validate:
             self._validate()
 
-    def _convert_to_uppercase(self, lowercase):
-        if np.any(lowercase):
-            with self._byte_ownership():
-                self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
-
     def _validate(self):
         # This is the fastest way that we have found to identify the
         # presence or absence of certain characters (numbers).
@@ -207,58 +209,17 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
             bad = list(np.where(
                 invalid_characters > 0)[0].astype(np.uint8).view('|S1'))
             raise ValueError(
-                "Invalid character%s in sequence: %r. Valid IUPAC characters: "
+                "Invalid character%s in sequence: %r. \n"
+                "Lowercase letters are not used in IUPAC notation. You can "
+                "pass `lowercase=True` if your sequence contains lowercase "
+                "letters.\n"
+                "Valid IUPAC characters: "
                 "%r" % ('s' if len(bad) > 1 else '',
                         [str(b.tostring().decode("ascii")) for b in bad] if
                         len(bad) > 1 else bad[0],
                         list(self.alphabet)))
 
-    def lowercase(self, lowercase):
-        """Return a case-sensitive string representation of the sequence.
-
-        Parameters
-        ----------
-        lowercase: str or boolean vector
-            If lowercase is a boolean vector, it is used to set sequence
-            characters to lowercase in the output string. True values in the
-            boolean vector correspond to lowercase characters. If lowercase
-            is a str, it is treated like a key into the positional metadata,
-            pointing to a column which must be a boolean vector.
-            That boolean vector is then used as described previously.
-
-        Returns
-        -------
-        str
-            String representation of sequence with specified characters set to
-            lowercase.
-
-        Examples
-        --------
-        >>> from skbio import DNA
-        >>> s = DNA('ACGT')
-        >>> s.lowercase([True, True, False, False])
-        'acGT'
-        >>> s = DNA('ACGT',
-        ...         positional_metadata={'exons': [True, False, False, True]})
-        >>> s.lowercase('exons')
-        'aCGt'
-
-        Constructor automatically populates a column in positional metadata
-        when the ``lowercase`` keyword argument is provided with a column name:
-
-        >>> s = DNA('ACgt', lowercase='introns')
-        >>> s.lowercase('introns')
-        'ACgt'
-        >>> s = DNA('ACGT', lowercase='introns')
-        >>> s.lowercase('introns')
-        'ACGT'
-
-        """
-        index = self._munge_to_index_array(lowercase)
-        outbytes = self._bytes.copy()
-        outbytes[index] ^= self._ascii_invert_case_bit_offset
-        return str(outbytes.tostring().decode('ascii'))
-
+    @stable(as_of='0.4.0')
     def gaps(self):
         """Find positions containing gaps in the biological sequence.
 
@@ -282,6 +243,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         """
         return np.in1d(self._bytes, self._gap_codes)
 
+    @stable(as_of='0.4.0')
     def has_gaps(self):
         """Determine if the sequence contains one or more gap characters.
 
@@ -306,6 +268,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         # TODO: cache results
         return bool(self.gaps().any())
 
+    @stable(as_of='0.4.0')
     def degenerates(self):
         """Find positions containing degenerate characters in the sequence.
 
@@ -331,6 +294,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         """
         return np.in1d(self._bytes, self._degenerate_codes)
 
+    @stable(as_of='0.4.0')
     def has_degenerates(self):
         """Determine if sequence contains one or more degenerate characters.
 
@@ -361,6 +325,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         # TODO: cache results
         return bool(self.degenerates().any())
 
+    @stable(as_of='0.4.0')
     def nondegenerates(self):
         """Find positions containing non-degenerate characters in the sequence.
 
@@ -386,6 +351,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         """
         return np.in1d(self._bytes, self._nondegenerate_codes)
 
+    @stable(as_of='0.4.0')
     def has_nondegenerates(self):
         """Determine if sequence contains one or more non-degenerate characters
 
@@ -415,6 +381,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         # TODO: cache results
         return bool(self.nondegenerates().any())
 
+    @stable(as_of='0.4.0')
     def degap(self):
         """Return a new sequence with gap characters removed.
 
@@ -456,6 +423,7 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         """
         return self[np.invert(self.gaps())]
 
+    @stable(as_of='0.4.0')
     def expand_degenerates(self):
         """Yield all possible non-degenerate versions of the sequence.
 
@@ -523,6 +491,40 @@ class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
         return (self._to(sequence=''.join(nondegen_seq)) for nondegen_seq in
                 result)
 
+    @stable(as_of='0.4.1')
+    def to_regex(self):
+        """Return regular expression object that accounts for degenerate chars.
+
+        Returns
+        -------
+        regex
+            Pre-compiled regular expression object (as from ``re.compile``)
+            that matches all non-degenerate versions of this sequence, and
+            nothing else.
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> seq = DNA('TRG')
+        >>> regex = seq.to_regex()
+        >>> regex.match('TAG').string
+        'TAG'
+        >>> regex.match('TGG').string
+        'TGG'
+        >>> regex.match('TCG') is None
+        True
+
+        """
+        regex_string = []
+        for base in str(self):
+            if base in self.degenerate_chars:
+                regex_string.append('[{0}]'.format(
+                    ''.join(self.degenerate_map[base])))
+            else:
+                regex_string.append(base)
+        return re.compile(''.join(regex_string))
+
+    @stable(as_of='0.4.0')
     def find_motifs(self, motif_type, min_length=1, ignore=None):
         """Search the biological sequence for motifs.
 
diff --git a/skbio/sequence/_nucleotide_mixin.py b/skbio/sequence/_nucleotide_mixin.py
index 6534c7e..9b5c75c 100644
--- a/skbio/sequence/_nucleotide_mixin.py
+++ b/skbio/sequence/_nucleotide_mixin.py
@@ -13,7 +13,7 @@ from abc import ABCMeta, abstractproperty
 
 import numpy as np
 
-from skbio.util._decorator import classproperty
+from skbio.util._decorator import classproperty, stable
 from ._iupac_sequence import _motifs as parent_motifs
 
 
@@ -59,6 +59,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
 
     @abstractproperty
     @classproperty
+    @stable(as_of='0.4.0')
     def complement_map(cls):
         """Return mapping of nucleotide characters to their complements.
 
@@ -75,6 +76,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
         """
         return set()  # pragma: no cover
 
+    @stable(as_of='0.4.0')
     def complement(self, reverse=False):
         """Return the complement of the nucleotide sequence.
 
@@ -151,6 +153,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
             complement = complement[::-1]
         return complement
 
+    @stable(as_of='0.4.0')
     def reverse_complement(self):
         """Return the reverse complement of the nucleotide sequence.
 
@@ -196,6 +199,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
         """
         return self.complement(reverse=True)
 
+    @stable(as_of='0.4.0')
     def is_reverse_complement(self, other):
         """Determine if a sequence is the reverse complement of this sequence.
 
@@ -242,6 +246,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
             # underlying sequence data
             return self.reverse_complement()._string == other._string
 
+    @stable(as_of='0.4.0')
     def gc_content(self):
         """Calculate the relative frequency of G's and C's in the sequence.
 
@@ -286,6 +291,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
         """
         return self.gc_frequency(relative=True)
 
+    @stable(as_of='0.4.0')
     def gc_frequency(self, relative=False):
         """Calculate frequency of G's and C's in the sequence.
 
diff --git a/skbio/sequence/_protein.py b/skbio/sequence/_protein.py
index e1f7863..14b3e4e 100644
--- a/skbio/sequence/_protein.py
+++ b/skbio/sequence/_protein.py
@@ -30,6 +30,14 @@ class Protein(IUPACSequence):
         Arbitrary per-character metadata. For example, quality data from
         sequencing reads. Must be able to be passed directly to the Pandas
         DataFrame constructor.
+    lowercase : bool or str, optional
+        If ``True``, lowercase sequence characters will be converted to
+        uppercase characters in order to be valid IUPAC Protein characters. If
+        ``False``, no characters will be converted. If a str, it will be
+        treated as a key into the positional metadata of the object. All
+        lowercase characters will be converted to uppercase, and a ``True``
+        value will be stored in a boolean array in the positional metadata
+        under the key.
     validate : bool, optional
         If ``True``, validation will be performed to ensure that all sequence
         characters are in the IUPAC protein character set. If ``False``,
@@ -39,15 +47,6 @@ class Protein(IUPACSequence):
         will work or behave as expected.** Only turn off validation if you are
         certain that the sequence characters are valid. To store sequence data
         that is not IUPAC-compliant, use ``Sequence``.
-    lowercase : bool or str, optional
-        If ``True``, lowercase sequence characters will be converted to
-        uppercase characters in order to be valid IUPAC Protein characters. If
-        ``False``, no characters will be converted. If a str, it will be
-        treated as a key into the positional metadata of the object. All
-        lowercase characters will be converted to uppercase, and a ``True``
-        value will be stored in a boolean array in the positional metadata
-        under the key.
-
 
     Attributes
     ----------
@@ -56,6 +55,7 @@ class Protein(IUPACSequence):
     positional_metadata
     alphabet
     gap_chars
+    default_gap_char
     stop_chars
     nondegenerate_chars
     degenerate_chars
diff --git a/skbio/sequence/_repr.py b/skbio/sequence/_repr.py
new file mode 100644
index 0000000..2a1249d
--- /dev/null
+++ b/skbio/sequence/_repr.py
@@ -0,0 +1,108 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import math
+
+from skbio.util._misc import chunk_str
+from skbio.util._metadata_repr import _MetadataReprBuilder
+
+
+class _SequenceReprBuilder(_MetadataReprBuilder):
+    """Build a ``Sequence`` repr.
+
+    Parameters
+    ----------
+    seq : Sequence
+        Sequence to repr.
+    width : int
+        Maximum width of the repr.
+    indent : int
+        Number of spaces to use for indented lines.
+    chunk_size: int
+        Number of characters in each chunk of a sequence.
+
+    """
+    def __init__(self, seq, width, indent, chunk_size):
+        super(_SequenceReprBuilder, self).__init__(seq, width, indent)
+        self._chunk_size = chunk_size
+
+    def _process_header(self):
+        cls_name = self._obj.__class__.__name__
+        self._lines.add_line(cls_name)
+        self._lines.add_separator()
+
+    def _process_data(self):
+        num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
+
+        # display entire sequence if we can, else display the first two and
+        # last two lines separated by ellipsis
+        if num_lines <= 5:
+            self._lines.add_lines(self._format_chunked_seq(
+                range(num_lines), num_chars, column_width))
+        else:
+            self._lines.add_lines(self._format_chunked_seq(
+                range(2), num_chars, column_width))
+            self._lines.add_line('...')
+            self._lines.add_lines(self._format_chunked_seq(
+                range(num_lines - 2, num_lines), num_chars, column_width))
+
+    def _find_optimal_seq_chunking(self):
+        """Find the optimal number of sequence chunks to fit on a single line.
+
+        Returns the number of lines the sequence will occupy, the number of
+        sequence characters displayed on each line, and the column width
+        necessary to display position info using the optimal number of sequence
+        chunks.
+
+        """
+        # strategy: use an iterative approach to find the optimal number of
+        # sequence chunks per line. start with a single chunk and increase
+        # until the max line width is exceeded. when this happens, the previous
+        # number of chunks is optimal
+        num_lines = 0
+        num_chars = 0
+        column_width = 0
+
+        num_chunks = 1
+        not_exceeded = True
+        while not_exceeded:
+            line_len, new_chunk_info = self._compute_chunked_seq_line_len(
+                num_chunks)
+            not_exceeded = line_len <= self._width
+            if not_exceeded:
+                num_lines, num_chars, column_width = new_chunk_info
+                num_chunks += 1
+        return num_lines, num_chars, column_width
+
+    def _compute_chunked_seq_line_len(self, num_chunks):
+        """Compute line length based on a number of chunks."""
+        num_chars = num_chunks * self._chunk_size
+
+        # ceil to account for partial line
+        num_lines = int(math.ceil(len(self._obj) / num_chars))
+
+        # position column width is fixed width, based on the number of
+        # characters necessary to display the position of the final line (all
+        # previous positions will be left justified using this width)
+        column_width = len('%d ' % ((num_lines - 1) * num_chars))
+
+        # column width + number of sequence characters + spaces between chunks
+        line_len = column_width + num_chars + (num_chunks - 1)
+        return line_len, (num_lines, num_chars, column_width)
+
+    def _format_chunked_seq(self, line_idxs, num_chars, column_width):
+        """Format specified lines of chunked sequence data."""
+        lines = []
+        for line_idx in line_idxs:
+            seq_idx = line_idx * num_chars
+            chars = str(self._obj[seq_idx:seq_idx+num_chars])
+            chunked_chars = chunk_str(chars, self._chunk_size, ' ')
+            lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
+        return lines
diff --git a/skbio/sequence/_rna.py b/skbio/sequence/_rna.py
index e05d0a2..7e81da5 100644
--- a/skbio/sequence/_rna.py
+++ b/skbio/sequence/_rna.py
@@ -30,6 +30,14 @@ class RNA(IUPACSequence, NucleotideMixin):
         Arbitrary per-character metadata. For example, quality data from
         sequencing reads. Must be able to be passed directly to the Pandas
         DataFrame constructor.
+    lowercase : bool or str, optional
+        If ``True``, lowercase sequence characters will be converted to
+        uppercase characters in order to be valid IUPAC RNA characters. If
+        ``False``, no characters will be converted. If a str, it will be
+        treated as a key into the positional metadata of the object. All
+        lowercase characters will be converted to uppercase, and a ``True``
+        value will be stored in a boolean array in the positional metadata
+        under the key.
     validate : bool, optional
         If ``True``, validation will be performed to ensure that all sequence
         characters are in the IUPAC RNA character set. If ``False``, validation
@@ -39,15 +47,6 @@ class RNA(IUPACSequence, NucleotideMixin):
         work or behave as expected.** Only turn off validation if you are
         certain that the sequence characters are valid. To store sequence data
         that is not IUPAC-compliant, use ``Sequence``.
-    lowercase : bool or str, optional
-        If ``True``, lowercase sequence characters will be converted to
-        uppercase characters in order to be valid IUPAC RNA characters. If
-        ``False``, no characters will be converted. If a str, it will be
-        treated as a key into the positional metadata of the object. All
-        lowercase characters will be converted to uppercase, and a ``True``
-        value will be stored in a boolean array in the positional metadata
-        under the key.
-
 
     Attributes
     ----------
@@ -56,6 +55,7 @@ class RNA(IUPACSequence, NucleotideMixin):
     positional_metadata
     alphabet
     gap_chars
+    default_gap_char
     nondegenerate_chars
     degenerate_chars
     degenerate_map
@@ -136,6 +136,73 @@ class RNA(IUPACSequence, NucleotideMixin):
     def _motifs(self):
         return _motifs
 
+    @stable(as_of="0.4.1")
+    def reverse_transcribe(self):
+        """Reverse transcribe RNA into DNA.
+
+        It returns the coding DNA strand of the RNA sequence, i.e. uracil (U)
+        is replaced with thymine (T) in the reverse transcribed sequence.
+
+        Returns
+        -------
+        DNA
+            Reverse transcribed sequence.
+
+        See Also
+        --------
+        DNA.transcribe
+        translate
+        translate_six_frames
+
+        Notes
+        -----
+        RNA sequence's metadata and positional metadata are included in the
+        transcribed DNA sequence.
+
+        Examples
+        --------
+        Reverse transcribe RNA into DNA:
+
+        >>> from skbio import RNA
+        >>> rna = RNA('UAACGUUA')
+        >>> rna
+        RNA
+        -----------------------------
+        Stats:
+            length: 8
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 25.00%
+        -----------------------------
+        0 UAACGUUA
+        >>> rna.reverse_transcribe()
+        DNA
+        -----------------------------
+        Stats:
+            length: 8
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 25.00%
+        -----------------------------
+        0 TAACGTTA
+        """
+        seq = self._string.replace(b'U', b'T')
+
+        metadata = None
+        if self.has_metadata():
+            metadata = self.metadata
+
+        positional_metadata = None
+        if self.has_positional_metadata():
+            positional_metadata = self.positional_metadata
+
+        # turn off validation because `seq` is guaranteed to be valid
+        return skbio.DNA(seq, metadata=metadata,
+                         positional_metadata=positional_metadata,
+                         validate=False)
+
     @stable(as_of="0.4.0")
     def translate(self, genetic_code=1, *args, **kwargs):
         """Translate RNA sequence into protein sequence.
diff --git a/skbio/sequence/_sequence.py b/skbio/sequence/_sequence.py
index 73ffe70..34150b6 100644
--- a/skbio/sequence/_sequence.py
+++ b/skbio/sequence/_sequence.py
@@ -7,17 +7,13 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from future.builtins import range
+from future.builtins import range, zip
 from future.utils import viewitems
 import six
 
-import itertools
-import math
 import re
 import collections
-import copy
 import numbers
-import textwrap
 from contextlib import contextmanager
 
 import numpy as np
@@ -25,19 +21,23 @@ from scipy.spatial.distance import hamming
 
 import pandas as pd
 
-from skbio._base import SkbioObject
-from skbio.sequence._base import ElasticLines
-from skbio.util._misc import chunk_str
-from skbio.util._decorator import stable, experimental
+from skbio._base import SkbioObject, MetadataMixin, PositionalMetadataMixin
+from skbio.sequence._repr import _SequenceReprBuilder
+from skbio.util._decorator import (stable, experimental, deprecated,
+                                   classonlymethod, overrides)
 
 
-class Sequence(collections.Sequence, SkbioObject):
-    """Store biological sequence data and optional associated metadata.
+class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
+               SkbioObject):
+    """Store generic sequence data and optional associated metadata.
 
-    ``Sequence`` objects do not enforce an alphabet and are thus the most
-    generic objects for storing biological sequence data. Subclasses ``DNA``,
-    ``RNA``, and ``Protein`` enforce the IUPAC character set [1]_ for, and
-    provide operations specific to, each respective molecule type.
+    ``Sequence`` objects do not enforce an alphabet or grammar and are thus the
+    most generic objects for storing sequence data. ``Sequence`` objects do not
+    necessarily represent biological sequences. For example, ``Sequence`` can
+    be used to represent a position in a multiple sequence alignment.
+    Subclasses ``DNA``, ``RNA``, and ``Protein`` enforce the IUPAC character
+    set [1]_ for, and provide operations specific to, each respective molecule
+    type.
 
     ``Sequence`` objects consist of the underlying sequence data, as well
     as optional metadata and positional metadata. The underlying sequence
@@ -46,22 +46,30 @@ class Sequence(collections.Sequence, SkbioObject):
     Parameters
     ----------
     sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
-        Characters representing the biological sequence itself.
+        Characters representing the sequence itself.
     metadata : dict, optional
         Arbitrary metadata which applies to the entire sequence. A shallow copy
         of the ``dict`` will be made (see Examples section below for details).
     positional_metadata : pd.DataFrame consumable, optional
         Arbitrary per-character metadata (e.g., sequence read quality
         scores). Must be able to be passed directly to ``pd.DataFrame``
-        constructor. Each column of metadata must be the same length as the
-        biological sequence. A shallow copy of the positional metadata will be
-        made if necessary (see Examples section below for details).
+        constructor. Each column of metadata must be the same length as
+        `sequence`. A shallow copy of the positional metadata will be made if
+        necessary (see Examples section below for details).
+    lowercase : bool or str, optional
+        If ``True``, lowercase sequence characters will be converted to
+        uppercase characters. If ``False``, no characters will be converted.
+        If a str, it will be treated as a key into the positional metadata of
+        the object. All lowercase characters will be converted to uppercase,
+        and a ``True`` value will be stored in a boolean array in the
+        positional metadata under the key.
 
     Attributes
     ----------
     values
     metadata
     positional_metadata
+    observed_chars
 
     See Also
     --------
@@ -105,7 +113,7 @@ class Sequence(collections.Sequence, SkbioObject):
     Sequence
     -----------------------------
     Metadata:
-        'authors': <type 'list'>
+        'authors': <class 'list'>
         'desc': 'seq desc'
         'id': 'seq-id'
     Positional metadata:
@@ -121,17 +129,17 @@ class Sequence(collections.Sequence, SkbioObject):
     Retrieve underlying sequence:
 
     >>> seq.values # doctest: +NORMALIZE_WHITESPACE
-    array(['A', 'C', 'G', 'T'],
+    array([b'A', b'C', b'G', b'T'],
           dtype='|S1')
 
     Underlying sequence immutable:
 
-    >>> seq.values = np.array(['T', 'C', 'G', 'A'], dtype='|S1')
+    >>> seq.values = np.array([b'T', b'C', b'G', b'A'], dtype='|S1')
     Traceback (most recent call last):
         ...
     AttributeError: can't set attribute
 
-    >>> seq.values[0] = 'T'
+    >>> seq.values[0] = b'T'
     Traceback (most recent call last):
         ...
     ValueError: assignment destination is read-only
@@ -193,15 +201,15 @@ class Sequence(collections.Sequence, SkbioObject):
     >>> subseq = seq[1:3]
     >>> subseq
     Sequence
-    ----------------------------
+    -----------------------------
     Metadata:
-        'authors': <type 'list'>
+        'authors': <class 'list'>
         'desc': 'seq desc'
         'id': 'new-id'
         'pubmed': 12345
     Stats:
         length: 2
-    ----------------------------
+    -----------------------------
     0 CG
     >>> pprint(subseq.metadata)
     {'authors': ['Alice', 'Bob'],
@@ -312,6 +320,11 @@ class Sequence(collections.Sequence, SkbioObject):
     3      []       10
 
     """
+    _number_of_extended_ascii_codes = 256
+    # ASCII is built such that the difference between uppercase and lowercase
+    # is the 6th bit.
+    _ascii_invert_case_bit_offset = 32
+    _ascii_lowercase_boundary = 90
     default_write_format = 'fasta'
     __hash__ = None
 
@@ -329,200 +342,187 @@ class Sequence(collections.Sequence, SkbioObject):
         >>> from skbio import Sequence
         >>> s = Sequence('AACGA')
         >>> s.values # doctest: +NORMALIZE_WHITESPACE
-        array(['A', 'A', 'C', 'G', 'A'],
+        array([b'A', b'A', b'C', b'G', b'A'],
               dtype='|S1')
 
         """
         return self._bytes.view('|S1')
 
     @property
-    @stable(as_of="0.4.0")
-    def metadata(self):
-        """``dict`` containing metadata which applies to the entire sequence.
+    @experimental(as_of="0.4.1")
+    def observed_chars(self):
+        """Set of observed characters in the sequence.
 
         Notes
         -----
-        This property can be set and deleted.
+        This property is not writeable.
 
         Examples
         --------
-        >>> from pprint import pprint
         >>> from skbio import Sequence
+        >>> s = Sequence('AACGAC')
+        >>> s.observed_chars == {'G', 'A', 'C'}
+        True
 
-        Create a sequence with metadata:
-
-        >>> s = Sequence('ACGTACGTACGTACGT',
-        ...              metadata={'id': 'seq-id',
-        ...                        'description': 'seq description'})
-        >>> s
-        Sequence
-        ------------------------------------
-        Metadata:
-            'description': 'seq description'
-            'id': 'seq-id'
-        Stats:
-            length: 16
-        ------------------------------------
-        0 ACGTACGTAC GTACGT
-
-        Retrieve metadata:
-
-        >>> pprint(s.metadata) # using pprint to display dict in sorted order
-        {'description': 'seq description', 'id': 'seq-id'}
-
-        Update metadata:
-
-        >>> s.metadata['id'] = 'new-id'
-        >>> s.metadata['pubmed'] = 12345
-        >>> pprint(s.metadata)
-        {'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
-
-        Set metadata:
+        """
+        return set(str(self))
 
-        >>> s.metadata = {'abc': 123}
-        >>> s.metadata
-        {'abc': 123}
+    @property
+    def _string(self):
+        return self._bytes.tostring()
 
-        Delete metadata:
+    @classonlymethod
+    @experimental(as_of="0.4.1")
+    def concat(cls, sequences, how='strict'):
+        """Concatenate an iterable of ``Sequence`` objects.
 
-        >>> s.has_metadata()
-        True
-        >>> del s.metadata
-        >>> s.metadata
-        {}
-        >>> s.has_metadata()
-        False
+        Parameters
+        ----------
+        seqs : iterable (Sequence)
+            An iterable of ``Sequence`` objects or appropriate subclasses.
+        how : {'strict', 'inner', 'outer'}, optional
+            How to intersect the `positional_metadata` of the sequences.
+            If 'strict': the `positional_metadata` must have the exact same
+            columns; 'inner': an inner-join of the columns (only the shared set
+            of columns are used); 'outer': an outer-join of the columns
+            (all columns are used: missing values will be padded with NaN).
 
-        """
-        if self._metadata is None:
-            # not using setter to avoid copy
-            self._metadata = {}
-        return self._metadata
-
-    @metadata.setter
-    def metadata(self, metadata):
-        if not isinstance(metadata, dict):
-            raise TypeError("metadata must be a dict")
-        # shallow copy
-        self._metadata = metadata.copy()
-
-    @metadata.deleter
-    def metadata(self):
-        self._metadata = None
+        Returns
+        -------
+        Sequence
+            The returned sequence will be an instance of the class which
+            called this class-method.
 
-    @property
-    @stable(as_of="0.4.0")
-    def positional_metadata(self):
-        """``pd.DataFrame`` containing metadata on a per-character basis.
+        Raises
+        ------
+        ValueError
+            If `how` is not one of: 'strict', 'inner', or 'outer'.
+        ValueError
+            If `how` is 'strict' and the `positional_metadata` of each sequence
+            does not have the same columns.
+        TypeError
+            If the sequences cannot be cast as the calling class.
 
         Notes
         -----
-        This property can be set and deleted.
+        The sequence-wide metadata (``Sequence.metadata``) is not retained
+        during concatenation.
+
+        Sequence objects can be cast to a different type only when the new
+        type is an ancestor or child of the original type. Casting between
+        sibling types is not allowed, e.g. ``DNA`` -> ``RNA`` is not
+        allowed, but ``DNA`` -> ``Sequence`` or ``Sequence`` -> ``DNA``
+        would be.
 
         Examples
         --------
-        Create a DNA sequence with positional metadata:
+        Concatenate two DNA sequences into a new DNA object:
 
-        >>> from skbio import DNA
-        >>> seq = DNA(
-        ...     'ACGT',
-        ...     positional_metadata={'quality': [3, 3, 20, 11],
-        ...                          'exons': [True, True, False, True]})
-        >>> seq
+        >>> from skbio import DNA, Sequence
+        >>> s1 = DNA("ACGT")
+        >>> s2 = DNA("GGAA")
+        >>> DNA.concat([s1, s2])
         DNA
         -----------------------------
-        Positional metadata:
-            'exons': <dtype: bool>
-            'quality': <dtype: int64>
         Stats:
-            length: 4
+            length: 8
             has gaps: False
             has degenerates: False
             has non-degenerates: True
             GC-content: 50.00%
         -----------------------------
-        0 ACGT
+        0 ACGTGGAA
 
-        Retrieve positional metadata:
+        Concatenate DNA sequences into a Sequence object (type coercion):
 
-        >>> seq.positional_metadata
-           exons  quality
-        0   True        3
-        1   True        3
-        2  False       20
-        3   True       11
-
-        Update positional metadata:
-
-        >>> seq.positional_metadata['gaps'] = seq.gaps()
-        >>> seq.positional_metadata
-           exons  quality   gaps
-        0   True        3  False
-        1   True        3  False
-        2  False       20  False
-        3   True       11  False
-
-        Set positional metadata:
-
-        >>> seq.positional_metadata = {'degenerates': seq.degenerates()}
-        >>> seq.positional_metadata
-          degenerates
-        0       False
-        1       False
-        2       False
-        3       False
+        >>> Sequence.concat([s1, s2])
+        Sequence
+        -------------
+        Stats:
+            length: 8
+        -------------
+        0 ACGTGGAA
 
-        Delete positional metadata:
+        Positional metadata is conserved:
 
-        >>> seq.has_positional_metadata()
-        True
-        >>> del seq.positional_metadata
-        >>> seq.positional_metadata
-        Empty DataFrame
-        Columns: []
-        Index: [0, 1, 2, 3]
-        >>> seq.has_positional_metadata()
-        False
+        >>> s1 = DNA('AcgT', lowercase='one')
+        >>> s2 = DNA('GGaA', lowercase='one',
+        ...          positional_metadata={'two': [1, 2, 3, 4]})
+        >>> result = DNA.concat([s1, s2], how='outer')
+        >>> result
+        DNA
+        -----------------------------
+        Positional metadata:
+            'one': <dtype: bool>
+            'two': <dtype: float64>
+        Stats:
+            length: 8
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 50.00%
+        -----------------------------
+        0 ACGTGGAA
+        >>> result.positional_metadata
+             one  two
+        0  False  NaN
+        1   True  NaN
+        2   True  NaN
+        3  False  NaN
+        4  False    1
+        5  False    2
+        6   True    3
+        7  False    4
 
         """
-        if self._positional_metadata is None:
-            # not using setter to avoid copy
-            self._positional_metadata = pd.DataFrame(
-                index=np.arange(len(self)))
-        return self._positional_metadata
-
-    @positional_metadata.setter
-    def positional_metadata(self, positional_metadata):
-        try:
-            # copy=True to copy underlying data buffer
-            positional_metadata = pd.DataFrame(positional_metadata, copy=True)
-        except pd.core.common.PandasError as e:
-            raise TypeError('Positional metadata invalid. Must be consumable '
-                            'by pd.DataFrame. Original pandas error message: '
-                            '"%s"' % e)
-
-        num_rows = len(positional_metadata.index)
-        if num_rows != len(self):
-            raise ValueError(
-                "Number of positional metadata values (%d) must match the "
-                "number of characters in the sequence (%d)." %
-                (num_rows, len(self)))
-
-        positional_metadata.reset_index(drop=True, inplace=True)
-        self._positional_metadata = positional_metadata
-
-    @positional_metadata.deleter
-    def positional_metadata(self):
-        self._positional_metadata = None
-
-    @property
-    def _string(self):
-        return self._bytes.tostring()
+        if how not in {'strict', 'inner', 'outer'}:
+            raise ValueError("`how` must be 'strict', 'inner', or 'outer'.")
+
+        seqs = list(sequences)
+        if len(seqs) == 0:
+            return cls("")
+
+        for seq in seqs:
+            seq._assert_can_cast_to(cls)
+
+        if how == 'strict':
+            how = 'inner'
+            cols = []
+            for s in seqs:
+                if s.has_positional_metadata():
+                    cols.append(frozenset(s.positional_metadata))
+                else:
+                    cols.append(frozenset())
+            if len(set(cols)) > 1:
+                raise ValueError("The positional metadata of the sequences do"
+                                 " not have matching columns. Consider setting"
+                                 " how='inner' or how='outer'")
+        seq_data = []
+        pm_data = []
+        for seq in seqs:
+            seq_data.append(seq._bytes)
+            pm_data.append(seq.positional_metadata)
+            if not seq.has_positional_metadata():
+                del seq.positional_metadata
+
+        pm = pd.concat(pm_data, join=how, ignore_index=True)
+        bytes_ = np.concatenate(seq_data)
+
+        return cls(bytes_, positional_metadata=pm)
+
+    @classmethod
+    def _assert_can_cast_to(cls, target):
+        if not (issubclass(cls, target) or issubclass(target, cls)):
+            raise TypeError("Cannot cast %r as %r." %
+                            (cls.__name__, target.__name__))
+
+    @overrides(PositionalMetadataMixin)
+    def _positional_metadata_axis_len_(self):
+        return len(self)
 
     @stable(as_of="0.4.0")
-    def __init__(self, sequence, metadata=None,
-                 positional_metadata=None):
-
+    def __init__(self, sequence, metadata=None, positional_metadata=None,
+                 lowercase=False):
         if isinstance(sequence, np.ndarray):
             if sequence.dtype == np.uint8:
                 self._set_bytes_contiguous(sequence)
@@ -539,6 +539,9 @@ class Sequence(collections.Sequence, SkbioObject):
                     "np.uint8 or '|S1'. Invalid dtype: %s" %
                     sequence.dtype)
         elif isinstance(sequence, Sequence):
+            # Sequence casting is acceptable between direct
+            # decendants/ancestors
+            sequence._assert_can_cast_to(type(self))
             # we're not simply accessing sequence.metadata in order to avoid
             # creating "empty" metadata representations on both sequence
             # objects if they don't have metadata. same strategy is used below
@@ -574,15 +577,22 @@ class Sequence(collections.Sequence, SkbioObject):
 
             self._set_bytes(sequence)
 
-        if metadata is None:
-            self._metadata = None
-        else:
-            self.metadata = metadata
+        MetadataMixin._init_(self, metadata=metadata)
+        PositionalMetadataMixin._init_(
+            self, positional_metadata=positional_metadata)
+
+        if lowercase is False:
+            pass
+        elif lowercase is True or isinstance(lowercase, six.string_types):
+            lowercase_mask = self._bytes > self._ascii_lowercase_boundary
+            self._convert_to_uppercase(lowercase_mask)
 
-        if positional_metadata is None:
-            self._positional_metadata = None
+            # If it isn't True, it must be a string_type
+            if not (lowercase is True):
+                self.positional_metadata[lowercase] = lowercase_mask
         else:
-            self.positional_metadata = positional_metadata
+            raise TypeError("lowercase keyword argument expected a bool or "
+                            "string, but got %s" % type(lowercase))
 
     def _set_bytes_contiguous(self, sequence):
         """Munge the sequence data into a numpy array of dtype uint8."""
@@ -602,9 +612,14 @@ class Sequence(collections.Sequence, SkbioObject):
         sequence.flags.writeable = False
         self._bytes = sequence
 
+    def _convert_to_uppercase(self, lowercase):
+        if np.any(lowercase):
+            with self._byte_ownership():
+                self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
+
     @stable(as_of="0.4.0")
     def __contains__(self, subsequence):
-        """Determine if a subsequence is contained in the biological sequence.
+        """Determine if a subsequence is contained in this sequence.
 
         Parameters
         ----------
@@ -614,14 +629,13 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         bool
-            Indicates whether `subsequence` is contained in the biological
-            sequence.
+            Indicates whether `subsequence` is contained in this sequence.
 
         Raises
         ------
         TypeError
             If `subsequence` is a ``Sequence`` object with a different type
-            than the biological sequence.
+            than this sequence.
 
         Examples
         --------
@@ -637,11 +651,10 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def __eq__(self, other):
-        """Determine if the biological sequence is equal to another.
+        """Determine if this sequence is equal to another.
 
-        Biological sequences are equal if they are *exactly* the same type and
-        their sequence characters, metadata, and positional metadata are the
-        same.
+        Sequences are equal if they are *exactly* the same type and their
+        sequence characters, metadata, and positional metadata are the same.
 
         Parameters
         ----------
@@ -651,11 +664,11 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         bool
-            Indicates whether the biological sequence is equal to `other`.
+            Indicates whether this sequence is equal to `other`.
 
         Examples
         --------
-        Define two biological sequences that have the same underlying sequence
+        Define two ``Sequence`` objects that have the same underlying sequence
         of characters:
 
         >>> from skbio import Sequence
@@ -672,15 +685,15 @@ class Sequence(collections.Sequence, SkbioObject):
         >>> t == s
         True
 
-        Define another biological sequence with a different sequence of
-        characters than the previous two biological sequences:
+        Define another sequence object with a different sequence of characters
+        than the previous two sequence objects:
 
         >>> u = Sequence('ACGA')
         >>> u == t
         False
 
-        Define a biological sequence with the same sequence of characters as
-        ``u`` but with different metadata and positional metadata:
+        Define a sequence with the same sequence of characters as ``u`` but
+        with different metadata and positional metadata:
 
         >>> v = Sequence('ACGA', metadata={'id': 'abc'},
         ...              positional_metadata={'quality':[1, 5, 3, 3]})
@@ -696,43 +709,23 @@ class Sequence(collections.Sequence, SkbioObject):
         if self.__class__ != other.__class__:
             return False
 
-        # we're not simply comparing self.metadata to other.metadata in order
-        # to avoid creating "empty" metadata representations on the sequence
-        # objects if they don't have metadata. same strategy is used below for
-        # positional metadata
-        if self.has_metadata() and other.has_metadata():
-            if self.metadata != other.metadata:
-                return False
-        elif not (self.has_metadata() or other.has_metadata()):
-            # both don't have metadata
-            pass
-        else:
-            # one has metadata while the other does not
+        if not MetadataMixin._eq_(self, other):
             return False
 
         if self._string != other._string:
             return False
 
-        if self.has_positional_metadata() and other.has_positional_metadata():
-            if not self.positional_metadata.equals(other.positional_metadata):
-                return False
-        elif not (self.has_positional_metadata() or
-                  other.has_positional_metadata()):
-            # both don't have positional metadata
-            pass
-        else:
-            # one has positional metadata while the other does not
+        if not PositionalMetadataMixin._eq_(self, other):
             return False
 
         return True
 
     @stable(as_of="0.4.0")
     def __ne__(self, other):
-        """Determine if the biological sequence is not equal to another.
+        """Determine if this sequence is not equal to another.
 
-        Biological sequences are not equal if they are not *exactly* the same
-        type, or their sequence characters, metadata, or positional metadata
-        differ.
+        Sequences are not equal if they are not *exactly* the same type, or
+        their sequence characters, metadata, or positional metadata differ.
 
         Parameters
         ----------
@@ -742,7 +735,7 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         bool
-            Indicates whether the biological sequence is not equal to `other`.
+            Indicates whether this sequence is not equal to `other`.
 
         Examples
         --------
@@ -763,32 +756,31 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def __getitem__(self, indexable):
-        """Slice the biological sequence.
+        """Slice this sequence.
 
         Parameters
         ----------
         indexable : int, slice, iterable (int and slice), 1D array_like (bool)
-            The position(s) to return from the biological sequence. If
-            `indexable` is an iterable of integers, these are assumed to be
-            indices in the sequence to keep. If `indexable` is a 1D
-            ``array_like`` of booleans, these are assumed to be the positions
-            in the sequence to keep.
+            The position(s) to return from this sequence. If `indexable` is an
+            iterable of integers, these are assumed to be indices in the
+            sequence to keep. If `indexable` is a 1D ``array_like`` of
+            booleans, these are assumed to be the positions in the sequence to
+            keep.
 
         Returns
         -------
         Sequence
-            New biological sequence containing the position(s) specified by
-            `indexable` in the current biological sequence. If quality scores
-            are present, they will be sliced in the same manner and included in
-            the returned biological sequence. ID and description are also
-            included.
+            New sequence containing the position(s) specified by `indexable` in
+            this sequence. Positional metadata will be sliced in the same
+            manner and included in the returned sequence. `metadata` is
+            included in the returned sequence.
 
         Examples
         --------
         >>> from skbio import Sequence
         >>> s = Sequence('GGUCGUGAAGGA')
 
-        Obtain a single character from the biological sequence:
+        Obtain a single character from the sequence:
 
         >>> s[1]
         Sequence
@@ -891,12 +883,12 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def __len__(self):
-        """Return the number of characters in the biological sequence.
+        """Return the number of characters in this sequence.
 
         Returns
         -------
         int
-            The length of the biological sequence.
+            The length of this sequence.
 
         Examples
         --------
@@ -909,7 +901,7 @@ class Sequence(collections.Sequence, SkbioObject):
         return self._bytes.size
 
     @stable(as_of="0.4.0")
-    def __nonzero__(self):
+    def __bool__(self):
         """Returns truth value (truthiness) of sequence.
 
         Returns
@@ -928,9 +920,11 @@ class Sequence(collections.Sequence, SkbioObject):
         """
         return len(self) > 0
 
+    __nonzero__ = __bool__
+
     @stable(as_of="0.4.0")
     def __iter__(self):
-        """Iterate over positions in the biological sequence.
+        """Iterate over positions in this sequence.
 
         Yields
         ------
@@ -955,7 +949,7 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def __reversed__(self):
-        """Iterate over positions in the biological sequence in reverse order.
+        """Iterate over positions in this sequence in reverse order.
 
         Yields
         ------
@@ -979,7 +973,7 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def __str__(self):
-        """Return biological sequence characters as a string.
+        """Return sequence characters as a string.
 
         Returns
         -------
@@ -1003,7 +997,7 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def __repr__(self):
-        r"""Return a string representation of the biological sequence object.
+        r"""Return a string representation of this sequence object.
 
         Representation includes:
 
@@ -1023,7 +1017,7 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         str
-            String representation of the biological sequence object.
+            String representation of this sequence object.
 
         Notes
         -----
@@ -1076,7 +1070,7 @@ class Sequence(collections.Sequence, SkbioObject):
         Sequence
         ----------------------------------------------------------------------
         Metadata:
-            'authors': <type 'list'>
+            'authors': <class 'list'>
             'description': "description of the sequence, wrapping across lines
                             if it's too long"
             'id': 'seq-id'
@@ -1119,7 +1113,7 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def __copy__(self):
-        """Return a shallow copy of the biological sequence.
+        """Return a shallow copy of this sequence.
 
         See Also
         --------
@@ -1130,11 +1124,11 @@ class Sequence(collections.Sequence, SkbioObject):
         This method is equivalent to ``seq.copy(deep=False)``.
 
         """
-        return self.copy(deep=False)
+        return self._copy(False, {})
 
     @stable(as_of="0.4.0")
     def __deepcopy__(self, memo):
-        """Return a deep copy of the biological sequence.
+        """Return a deep copy of this sequence.
 
         See Also
         --------
@@ -1147,54 +1141,12 @@ class Sequence(collections.Sequence, SkbioObject):
         """
         return self._copy(True, memo)
 
-    @stable(as_of="0.4.0")
-    def has_metadata(self):
-        """Determine if the sequence contains metadata.
-
-        Returns
-        -------
-        bool
-            Indicates whether the sequence has metadata
-
-        Examples
-        --------
-        >>> from skbio import DNA
-        >>> s = DNA('ACACGACGTT')
-        >>> s.has_metadata()
-        False
-        >>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
-        >>> t.has_metadata()
-        True
-
-        """
-        return self._metadata is not None and bool(self.metadata)
-
-    @stable(as_of="0.4.0")
-    def has_positional_metadata(self):
-        """Determine if the sequence contains positional metadata.
-
-        Returns
-        -------
-        bool
-            Indicates whether the sequence has positional metadata
-
-        Examples
-        --------
-        >>> from skbio import DNA
-        >>> s = DNA('ACACGACGTT')
-        >>> s.has_positional_metadata()
-        False
-        >>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
-        >>> t.has_positional_metadata()
-        True
-
-        """
-        return (self._positional_metadata is not None and
-                len(self.positional_metadata.columns) > 0)
-
-    @stable(as_of="0.4.0")
+    @deprecated(as_of="0.4.1", until="0.5.1",
+                reason="Use `copy.copy(seq)` instead of "
+                       "`seq.copy(deep=False)`, and `copy.deepcopy(seq)` "
+                       "instead of `seq.copy(deep=True)`.")
     def copy(self, deep=False):
-        """Return a copy of the biological sequence.
+        """Return a copy of this sequence.
 
         Parameters
         ----------
@@ -1204,7 +1156,7 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         Sequence
-            Copy of the biological sequence.
+            Copy of this sequence.
 
         Notes
         -----
@@ -1324,33 +1276,73 @@ class Sequence(collections.Sequence, SkbioObject):
         # we don't make a distinction between deep vs. shallow copy of bytes
         # because dtype=np.uint8. we only need to make the distinction when
         # dealing with object dtype
-        bytes = np.copy(self._bytes)
+        bytes_ = np.copy(self._bytes)
 
-        seq_copy = self._constructor(sequence=bytes, metadata=None,
+        seq_copy = self._constructor(sequence=bytes_, metadata=None,
                                      positional_metadata=None)
 
-        if self.has_metadata():
-            metadata = self.metadata
-            if deep:
-                metadata = copy.deepcopy(metadata, memo)
-            else:
-                metadata = metadata.copy()
-            seq_copy._metadata = metadata
-
-        if self.has_positional_metadata():
-            positional_metadata = self.positional_metadata
-            if deep:
-                positional_metadata = copy.deepcopy(positional_metadata, memo)
-            else:
-                # deep=True makes a shallow copy of the underlying data buffer
-                positional_metadata = positional_metadata.copy(deep=True)
-            seq_copy._positional_metadata = positional_metadata
+        if deep:
+            seq_copy._metadata = MetadataMixin._deepcopy_(self, memo)
+            seq_copy._positional_metadata = \
+                PositionalMetadataMixin._deepcopy_(self, memo)
+        else:
+            seq_copy._metadata = MetadataMixin._copy_(self)
+            seq_copy._positional_metadata = \
+                PositionalMetadataMixin._copy_(self)
 
         return seq_copy
 
+    @stable(as_of='0.4.0')
+    def lowercase(self, lowercase):
+        """Return a case-sensitive string representation of the sequence.
+
+        Parameters
+        ----------
+        lowercase: str or boolean vector
+            If lowercase is a boolean vector, it is used to set sequence
+            characters to lowercase in the output string. True values in the
+            boolean vector correspond to lowercase characters. If lowercase
+            is a str, it is treated like a key into the positional metadata,
+            pointing to a column which must be a boolean vector.
+            That boolean vector is then used as described previously.
+
+        Returns
+        -------
+        str
+            String representation of sequence with specified characters set to
+            lowercase.
+
+        Examples
+        --------
+        >>> from skbio import Sequence
+        >>> s = Sequence('ACGT')
+        >>> s.lowercase([True, True, False, False])
+        'acGT'
+        >>> s = Sequence('ACGT',
+        ...              positional_metadata={
+        ...                 'exons': [True, False, False, True]})
+        >>> s.lowercase('exons')
+        'aCGt'
+
+        Constructor automatically populates a column in positional metadata
+        when the ``lowercase`` keyword argument is provided with a column name:
+
+        >>> s = Sequence('ACgt', lowercase='introns')
+        >>> s.lowercase('introns')
+        'ACgt'
+        >>> s = Sequence('ACGT', lowercase='introns')
+        >>> s.lowercase('introns')
+        'ACGT'
+
+        """
+        index = self._munge_to_index_array(lowercase)
+        outbytes = self._bytes.copy()
+        outbytes[index] ^= self._ascii_invert_case_bit_offset
+        return str(outbytes.tostring().decode('ascii'))
+
     @stable(as_of="0.4.0")
     def count(self, subsequence, start=None, end=None):
-        """Count occurrences of a subsequence in the biological sequence.
+        """Count occurrences of a subsequence in this sequence.
 
         Parameters
         ----------
@@ -1364,7 +1356,7 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         int
-            Number of occurrences of `subsequence` in the biological sequence.
+            Number of occurrences of `subsequence` in this sequence.
 
         Raises
         ------
@@ -1372,7 +1364,7 @@ class Sequence(collections.Sequence, SkbioObject):
             If `subsequence` is of length 0.
         TypeError
             If `subsequence` is a ``Sequence`` object with a different type
-            than the biological sequence.
+            than this sequence.
 
         Examples
         --------
@@ -1401,7 +1393,7 @@ class Sequence(collections.Sequence, SkbioObject):
         Parameters
         ----------
         subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
-            Subsequence to search for in the biological sequence.
+            Subsequence to search for in this sequence.
         start : int, optional
             The position at which to start searching (inclusive).
         end : int, optional
@@ -1410,16 +1402,15 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         int
-            Position where `subsequence` first occurs in the biological
-            sequence.
+            Position where `subsequence` first occurs in this sequence.
 
         Raises
         ------
         ValueError
-            If `subsequence` is not present in the biological sequence.
+            If `subsequence` is not present in this sequence.
         TypeError
             If `subsequence` is a ``Sequence`` object with a different type
-            than the biological sequence.
+            than this sequence.
 
         Examples
         --------
@@ -1445,15 +1436,15 @@ class Sequence(collections.Sequence, SkbioObject):
         other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
             Sequence to compute the distance to.
         metric : function, optional
-            Function used to compute the distance between the biological
-            sequence and `other`. If ``None`` (the default),
+            Function used to compute the distance between this sequence and
+            `other`. If ``None`` (the default),
             ``scipy.spatial.distance.hamming`` will be used. This function
             should take two ``skbio.Sequence`` objects and return a ``float``.
 
         Returns
         -------
         float
-            Distance between the biological sequence and `other`.
+            Distance between this sequence and `other`.
 
         Raises
         ------
@@ -1469,8 +1460,8 @@ class Sequence(collections.Sequence, SkbioObject):
             removed from this method when the ``skbio.sequence.stats`` module
             is created (track progress on issue #913).
         TypeError
-            If `other` is a ``Sequence`` object with a different type than the
-            biological sequence.
+            If `other` is a ``Sequence`` object with a different type than this
+            sequence.
 
         See Also
         --------
@@ -1527,8 +1518,8 @@ class Sequence(collections.Sequence, SkbioObject):
         ValueError
             If the sequences are not the same length.
         TypeError
-            If `other` is a ``Sequence`` object with a different type than the
-            biological sequence.
+            If `other` is a ``Sequence`` object with a different type than this
+            sequence.
 
         See Also
         --------
@@ -1569,8 +1560,8 @@ class Sequence(collections.Sequence, SkbioObject):
         ValueError
             If the sequences are not the same length.
         TypeError
-            If `other` is a ``Sequence`` object with a different type than the
-            biological sequence.
+            If `other` is a ``Sequence`` object with a different type than this
+            sequence.
 
         See Also
         --------
@@ -1611,8 +1602,8 @@ class Sequence(collections.Sequence, SkbioObject):
         ValueError
             If the sequences are not the same length.
         TypeError
-            If `other` is a ``Sequence`` object with a different type than the
-            biological sequence.
+            If `other` is a ``Sequence`` object with a different type than this
+            sequence.
 
         See Also
         --------
@@ -1661,8 +1652,8 @@ class Sequence(collections.Sequence, SkbioObject):
         ValueError
             If the sequences are not the same length.
         TypeError
-            If `other` is a ``Sequence`` object with a different type than the
-            biological sequence.
+            If `other` is a ``Sequence`` object with a different type than this
+            sequence.
 
         See Also
         --------
@@ -1687,9 +1678,141 @@ class Sequence(collections.Sequence, SkbioObject):
         else:
             return int(self.mismatches(other).sum())
 
+    @experimental(as_of="0.4.1")
+    def frequencies(self, chars=None, relative=False):
+        """Compute frequencies of characters in the sequence.
+
+        Parameters
+        ----------
+        chars : str or set of str, optional
+            Characters to compute the frequencies of. May be a ``str``
+            containing a single character or a ``set`` of single-character
+            strings. If ``None``, frequencies will be computed for all
+            characters present in the sequence.
+        relative : bool, optional
+            If ``True``, return the relative frequency of each character
+            instead of its count. If `chars` is provided, relative frequencies
+            will be computed with respect to the number of characters in the
+            sequence, **not** the total count of characters observed in
+            `chars`. Thus, the relative frequencies will not necessarily sum to
+            1.0 if `chars` is provided.
+
+        Returns
+        -------
+        dict
+            Frequencies of characters in the sequence.
+
+        Raises
+        ------
+        TypeError
+            If `chars` is not a ``str`` or ``set`` of ``str``.
+        ValueError
+            If `chars` is not a single-character ``str`` or a ``set`` of
+            single-character strings.
+        ValueError
+            If `chars` contains characters outside the allowable range of
+            characters in a ``Sequence`` object.
+
+        See Also
+        --------
+        kmer_frequencies
+        iter_kmers
+
+        Notes
+        -----
+        If the sequence is empty (i.e., length zero), ``relative=True``,
+        **and** `chars` is provided, the relative frequency of each specified
+        character will be ``np.nan``.
+
+        If `chars` is not provided, this method is equivalent to, but faster
+        than, ``seq.kmer_frequencies(k=1)``.
+
+        If `chars` is not provided, it is equivalent to, but faster than,
+        passing ``chars=seq.observed_chars``.
+
+        Examples
+        --------
+        Compute character frequencies of a sequence:
+
+        >>> from pprint import pprint
+        >>> from skbio import Sequence
+        >>> seq = Sequence('AGAAGACC')
+        >>> freqs = seq.frequencies()
+        >>> pprint(freqs) # using pprint to display dict in sorted order
+        {'A': 4, 'C': 2, 'G': 2}
+
+        Compute relative character frequencies:
+
+        >>> freqs = seq.frequencies(relative=True)
+        >>> pprint(freqs)
+        {'A': 0.5, 'C': 0.25, 'G': 0.25}
+
+        Compute relative frequencies of characters A, C, and T:
+
+        >>> freqs = seq.frequencies(chars={'A', 'C', 'T'}, relative=True)
+        >>> pprint(freqs)
+        {'A': 0.5, 'C': 0.25, 'T': 0.0}
+
+        Note that since character T is not in the sequence we receive a
+        relative frequency of 0.0. The relative frequencies of A and C are
+        relative to the number of characters in the sequence (8), **not** the
+        number of A and C characters (4 + 2 = 6).
+
+        """
+        freqs = np.bincount(self._bytes,
+                            minlength=self._number_of_extended_ascii_codes)
+
+        if chars is not None:
+            chars, indices = self._chars_to_indices(chars)
+        else:
+            indices, = np.nonzero(freqs)
+            # Downcast from int64 to uint8 then convert to str. This is safe
+            # because we are guaranteed to have indices in the range 0 to 255
+            # inclusive.
+            chars = indices.astype(np.uint8).tostring().decode('ascii')
+
+        obs_counts = freqs[indices]
+        if relative:
+            obs_counts = obs_counts / len(self)
+
+        # Use tolist() for minor performance gain.
+        return dict(zip(chars, obs_counts.tolist()))
+
+    def _chars_to_indices(self, chars):
+        """Helper for Sequence.frequencies."""
+        if isinstance(chars, six.string_types) or \
+                isinstance(chars, six.binary_type):
+            chars = set([chars])
+        elif not isinstance(chars, set):
+            raise TypeError(
+                "`chars` must be of type `set`, not %r" % type(chars).__name__)
+
+        # Impose an (arbitrary) ordering to `chars` so that we can return
+        # `indices` in that same order.
+        chars = list(chars)
+        indices = []
+        for char in chars:
+            if not (isinstance(char, six.string_types) or
+                    isinstance(char, six.binary_type)):
+                raise TypeError(
+                    "Each element of `chars` must be string-like, not %r" %
+                    type(char).__name__)
+            if len(char) != 1:
+                raise ValueError(
+                    "Each element of `chars` must contain a single "
+                    "character (found %d characters)" % len(char))
+
+            index = ord(char)
+            if index >= self._number_of_extended_ascii_codes:
+                raise ValueError(
+                    "Character %r in `chars` is outside the range of "
+                    "allowable characters in a `Sequence` object." % char)
+            indices.append(index)
+        return chars, indices
+
     @stable(as_of="0.4.0")
     def iter_kmers(self, k, overlap=True):
-        """Generate kmers of length `k` from the biological sequence.
+        """Generate kmers of length `k` from this sequence.
 
         Parameters
         ----------
@@ -1701,7 +1824,7 @@ class Sequence(collections.Sequence, SkbioObject):
         Yields
         ------
         Sequence
-            kmer of length `k` contained in the biological sequence.
+            kmer of length `k` contained in this sequence.
 
         Raises
         ------
@@ -1750,7 +1873,7 @@ class Sequence(collections.Sequence, SkbioObject):
 
     @stable(as_of="0.4.0")
     def kmer_frequencies(self, k, overlap=True, relative=False):
-        """Return counts of words of length `k` from the biological sequence.
+        """Return counts of words of length `k` from this sequence.
 
         Parameters
         ----------
@@ -1764,11 +1887,8 @@ class Sequence(collections.Sequence, SkbioObject):
 
         Returns
         -------
-        collections.Counter or collections.defaultdict
-            Frequencies of words of length `k` contained in the biological
-            sequence. This will be a ``collections.Counter`` if `relative` is
-            ``False`` and a ``collections.defaultdict`` if `relative` is
-            ``True``.
+        dict
+            Frequencies of words of length `k` contained in this sequence.
 
         Raises
         ------
@@ -1777,16 +1897,19 @@ class Sequence(collections.Sequence, SkbioObject):
 
         Examples
         --------
+        >>> from pprint import pprint
         >>> from skbio import Sequence
         >>> s = Sequence('ACACATTTATTA')
-        >>> s.kmer_frequencies(3, overlap=False)
-        Counter({'TTA': 2, 'ACA': 1, 'CAT': 1})
-        >>> s.kmer_frequencies(3, relative=True, overlap=False)
-        defaultdict(<type 'float'>, {'ACA': 0.25, 'TTA': 0.5, 'CAT': 0.25})
+        >>> freqs = s.kmer_frequencies(3, overlap=False)
+        >>> pprint(freqs) # using pprint to display dict in sorted order
+        {'ACA': 1, 'CAT': 1, 'TTA': 2}
+        >>> freqs = s.kmer_frequencies(3, relative=True, overlap=False)
+        >>> pprint(freqs)
+        {'ACA': 0.25, 'CAT': 0.25, 'TTA': 0.5}
 
         """
         kmers = self.iter_kmers(k, overlap=overlap)
-        freqs = collections.Counter((str(seq) for seq in kmers))
+        freqs = dict(collections.Counter((str(seq) for seq in kmers)))
 
         if relative:
             if overlap:
@@ -1794,7 +1917,7 @@ class Sequence(collections.Sequence, SkbioObject):
             else:
                 num_kmers = len(self) // k
 
-            relative_freqs = collections.defaultdict(float)
+            relative_freqs = {}
             for kmer, count in viewitems(freqs):
                 relative_freqs[kmer] = count / num_kmers
             freqs = relative_freqs
@@ -1920,18 +2043,17 @@ class Sequence(collections.Sequence, SkbioObject):
                 yield r
 
     def _to(self, sequence=None, metadata=None, positional_metadata=None):
-        """Return a copy of the current biological sequence.
+        """Return a copy of this sequence.
 
-        Returns a copy of the current biological sequence, optionally with
-        updated attributes specified as keyword arguments.
+        Returns a copy of this sequence, optionally with updated attributes
+        specified as keyword arguments.
 
         Arguments are the same as those passed to the ``Sequence`` constructor.
         The returned copy will have its attributes updated based on the
         arguments. If an attribute is missing, the copy will keep the same
-        attribute as the current biological sequence. Valid attribute names
-        are `'sequence'`, `'metadata'`, and `'positional_metadata'`. Default
-        behavior is to return a copy of the current biological sequence
-        without changing any attributes.
+        attribute as this sequence. Valid attribute names are `'sequence'`,
+        `'metadata'`, and `'positional_metadata'`. Default behavior is to
+        return a copy of this sequence without changing any attributes.
 
         Parameters
         ----------
@@ -1942,9 +2064,8 @@ class Sequence(collections.Sequence, SkbioObject):
         Returns
         -------
         Sequence
-            Copy of the current biological sequence, optionally with updated
-            attributes based on arguments. Will be the same type as the current
-            biological sequence (`self`).
+            Copy of this sequence, optionally with updated attributes based on
+            arguments. Will be the same type as this sequence (`self`).
 
         Notes
         -----
@@ -1954,16 +2075,16 @@ class Sequence(collections.Sequence, SkbioObject):
         `Sequence.copy`, which will actually copy `sequence`.
 
         This method is the preferred way of creating new instances from an
-        existing biological sequence, instead of calling
-        ``self.__class__(...)``, as the latter can be error-prone (e.g.,
-        it's easy to forget to propagate attributes to the new instance).
+        existing sequence, instead of calling ``self.__class__(...)``, as the
+        latter can be error-prone (e.g., it's easy to forget to propagate
+        attributes to the new instance).
 
         """
         if sequence is None:
             sequence = self._bytes
-        if metadata is None:
+        if metadata is None and self.has_metadata():
             metadata = self._metadata
-        if positional_metadata is None:
+        if positional_metadata is None and self.has_positional_metadata():
             positional_metadata = self._positional_metadata
         return self._constructor(sequence=sequence, metadata=metadata,
                                  positional_metadata=positional_metadata)
@@ -2085,208 +2206,3 @@ def _slices_from_iter(array, indexables):
                              "containing %r." % i)
 
         yield array[i]
-
-
-class _SequenceReprBuilder(object):
-    """Build a ``Sequence`` repr.
-
-    Parameters
-    ----------
-    seq : Sequence
-        Sequence to repr.
-    width : int
-        Maximum width of the repr.
-    indent : int
-        Number of spaces to use for indented lines.
-    chunk_size: int
-        Number of characters in each chunk of a sequence.
-
-    """
-    def __init__(self, seq, width, indent, chunk_size):
-        self._seq = seq
-        self._width = width
-        self._indent = ' ' * indent
-        self._chunk_size = chunk_size
-
-    def build(self):
-        lines = ElasticLines()
-
-        cls_name = self._seq.__class__.__name__
-        lines.add_line(cls_name)
-        lines.add_separator()
-
-        if self._seq.has_metadata():
-            lines.add_line('Metadata:')
-            # Python 3 doesn't allow sorting of mixed types so we can't just
-            # use sorted() on the metadata keys. Sort first by type then sort
-            # by value within each type.
-            for key in self._sorted_keys_grouped_by_type(self._seq.metadata):
-                value = self._seq.metadata[key]
-                lines.add_lines(self._format_metadata_key_value(key, value))
-
-        if self._seq.has_positional_metadata():
-            lines.add_line('Positional metadata:')
-            for key in self._seq.positional_metadata.columns.values.tolist():
-                dtype = self._seq.positional_metadata[key].dtype
-                lines.add_lines(
-                    self._format_positional_metadata_column(key, dtype))
-
-        lines.add_line('Stats:')
-        for label, value in self._seq._repr_stats():
-            lines.add_line('%s%s: %s' % (self._indent, label, value))
-        lines.add_separator()
-
-        num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
-
-        # display entire sequence if we can, else display the first two and
-        # last two lines separated by ellipsis
-        if num_lines <= 5:
-            lines.add_lines(self._format_chunked_seq(
-                range(num_lines), num_chars, column_width))
-        else:
-            lines.add_lines(self._format_chunked_seq(
-                range(2), num_chars, column_width))
-            lines.add_line('...')
-            lines.add_lines(self._format_chunked_seq(
-                range(num_lines - 2, num_lines), num_chars, column_width))
-
-        return lines.to_str()
-
-    def _sorted_keys_grouped_by_type(self, dict_):
-        """Group keys within a dict by their type and sort within type."""
-        type_sorted = sorted(dict_, key=self._type_sort_key)
-        type_and_value_sorted = []
-        for _, group in itertools.groupby(type_sorted, self._type_sort_key):
-            type_and_value_sorted.extend(sorted(group))
-        return type_and_value_sorted
-
-    def _type_sort_key(self, key):
-        return repr(type(key))
-
-    def _format_metadata_key_value(self, key, value):
-        """Format metadata key:value, wrapping across lines if necessary."""
-        key_fmt = self._format_key(key)
-
-        supported_type = True
-        if isinstance(value, (six.text_type, six.binary_type)):
-            # for stringy values, there may be u'' or b'' depending on the type
-            # of `value` and version of Python. find the starting quote
-            # character so that wrapped text will line up with that instead of
-            # the string literal prefix character. for example:
-            #
-            #     'foo': u'abc def ghi
-            #              jkl mno'
-            value_repr = repr(value)
-            extra_indent = 1
-            if not (value_repr.startswith("'") or value_repr.startswith('"')):
-                extra_indent = 2
-        # handles any number, this includes bool
-        elif value is None or isinstance(value, numbers.Number):
-            value_repr = repr(value)
-            extra_indent = 0
-        else:
-            supported_type = False
-
-        if not supported_type or len(value_repr) > 140:
-            value_repr = str(type(value))
-            # extra indent of 1 so that wrapped text lines up past the bracket:
-            #
-            #     'foo': <type
-            #             'dict'>
-            extra_indent = 1
-
-        return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
-
-    def _format_key(self, key):
-        """Format metadata key.
-
-        Includes initial indent and trailing colon and space:
-
-            <indent>'foo':<space>
-
-        """
-        key_fmt = self._indent + repr(key)
-        supported_types = (six.text_type, six.binary_type, numbers.Number,
-                           type(None))
-        if len(key_fmt) > (self._width / 2) or not isinstance(key,
-                                                              supported_types):
-            key_fmt = self._indent + str(type(key))
-        return '%s: ' % key_fmt
-
-    def _wrap_text_with_indent(self, text, initial_text, extra_indent):
-        """Wrap text across lines with an initial indentation.
-
-        For example:
-
-            'foo': 'abc def
-                    ghi jkl
-                    mno pqr'
-
-        <indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
-        lines are indented such that they line up with the start of the
-        previous line of wrapped text.
-
-        """
-        return textwrap.wrap(
-            text, width=self._width, expand_tabs=False,
-            initial_indent=initial_text,
-            subsequent_indent=' ' * (len(initial_text) + extra_indent))
-
-    def _format_positional_metadata_column(self, key, dtype):
-        key_fmt = self._format_key(key)
-        dtype_fmt = '<dtype: %s>' % str(dtype)
-        return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
-
-    def _find_optimal_seq_chunking(self):
-        """Find the optimal number of sequence chunks to fit on a single line.
-
-        Returns the number of lines the sequence will occupy, the number of
-        sequence characters displayed on each line, and the column width
-        necessary to display position info using the optimal number of sequence
-        chunks.
-
-        """
-        # strategy: use an iterative approach to find the optimal number of
-        # sequence chunks per line. start with a single chunk and increase
-        # until the max line width is exceeded. when this happens, the previous
-        # number of chunks is optimal
-        num_lines = 0
-        num_chars = 0
-        column_width = 0
-
-        num_chunks = 1
-        not_exceeded = True
-        while not_exceeded:
-            line_len, new_chunk_info = self._compute_chunked_seq_line_len(
-                num_chunks)
-            not_exceeded = line_len <= self._width
-            if not_exceeded:
-                num_lines, num_chars, column_width = new_chunk_info
-                num_chunks += 1
-        return num_lines, num_chars, column_width
-
-    def _compute_chunked_seq_line_len(self, num_chunks):
-        """Compute line length based on a number of chunks."""
-        num_chars = num_chunks * self._chunk_size
-
-        # ceil to account for partial line
-        num_lines = int(math.ceil(len(self._seq) / num_chars))
-
-        # position column width is fixed width, based on the number of
-        # characters necessary to display the position of the final line (all
-        # previous positions will be left justified using this width)
-        column_width = len('%d ' % ((num_lines - 1) * num_chars))
-
-        # column width + number of sequence characters + spaces between chunks
-        line_len = column_width + num_chars + (num_chunks - 1)
-        return line_len, (num_lines, num_chars, column_width)
-
-    def _format_chunked_seq(self, line_idxs, num_chars, column_width):
-        """Format specified lines of chunked sequence data."""
-        lines = []
-        for line_idx in line_idxs:
-            seq_idx = line_idx * num_chars
-            chars = str(self._seq[seq_idx:seq_idx+num_chars])
-            chunked_chars = chunk_str(chars, self._chunk_size, ' ')
-            lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
-        return lines
diff --git a/skbio/sequence/tests/test_base.py b/skbio/sequence/tests/test_base.py
deleted file mode 100644
index f071903..0000000
--- a/skbio/sequence/tests/test_base.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-import unittest
-
-from skbio.sequence._base import ElasticLines
-
-
-class TestElasticLines(unittest.TestCase):
-    def setUp(self):
-        self.el = ElasticLines()
-
-    def test_empty(self):
-        self.assertEqual(self.el.to_str(), '')
-
-    def test_add_line(self):
-        self.el.add_line('foo')
-        self.assertEqual(self.el.to_str(), 'foo')
-
-    def test_add_lines(self):
-        self.el = ElasticLines()
-        self.el.add_lines(['alice', 'bob', 'carol'])
-        self.assertEqual(self.el.to_str(), 'alice\nbob\ncarol')
-
-    def test_add_separator(self):
-        self.el.add_separator()
-        self.assertEqual(self.el.to_str(), '')
-
-        self.el.add_line('foo')
-        self.assertEqual(self.el.to_str(), '---\nfoo')
-
-        self.el.add_separator()
-        self.el.add_lines(['bar', 'bazzzz'])
-        self.el.add_separator()
-
-        self.assertEqual(self.el.to_str(),
-                         '------\nfoo\n------\nbar\nbazzzz\n------')
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/skbio/sequence/tests/test_iupac_sequence.py b/skbio/sequence/tests/test_iupac_sequence.py
index 64fd0a2..d878a6d 100644
--- a/skbio/sequence/tests/test_iupac_sequence.py
+++ b/skbio/sequence/tests/test_iupac_sequence.py
@@ -39,9 +39,6 @@ class ExampleMotifsTester(ExampleIUPACSequence):
 
 
 class TestIUPACSequence(TestCase):
-    def setUp(self):
-        self.lowercase_seq = ExampleIUPACSequence('AAAAaaaa', lowercase='key')
-
     def test_instantiation_with_no_implementation(self):
         class IUPACSequenceSubclassNoImplementation(IUPACSequence):
             pass
@@ -171,27 +168,6 @@ class TestIUPACSequence(TestCase):
                                        invalid_type):
                 ExampleIUPACSequence('ACGTacgt', lowercase=invalid_key)
 
-    def test_lowercase_mungeable_key(self):
-        # NOTE: This test relies on Sequence._munge_to_index_array working
-        # properly. If the internal implementation of the lowercase method
-        # changes to no longer use _munge_to_index_array, this test may need
-        # to be updated to cover cases currently covered by
-        # _munge_to_index_array
-        self.assertEqual('AAAAaaaa', self.lowercase_seq.lowercase('key'))
-
-    def test_lowercase_array_key(self):
-        # NOTE: This test relies on Sequence._munge_to_index_array working
-        # properly. If the internal implementation of the lowercase method
-        # changes to no longer use _munge_to_index_array, this test may need
-        # to be updated to cover cases currently covered by
-        # _munge_to_index_array
-        self.assertEqual('aaAAaaaa',
-                         self.lowercase_seq.lowercase(
-                             np.array([True, True, False, False, True, True,
-                                       True, True])))
-        self.assertEqual('AaAAaAAA',
-                         self.lowercase_seq.lowercase([1, 4]))
-
     def test_degenerate_chars(self):
         expected = set("XYZ")
         self.assertIs(type(ExampleIUPACSequence.degenerate_chars), set)
@@ -231,6 +207,14 @@ class TestIUPACSequence(TestCase):
         with self.assertRaises(AttributeError):
             ExampleIUPACSequence('').gap_chars = set("_ =")
 
+    def test_default_gap_char(self):
+        self.assertIs(type(ExampleIUPACSequence.default_gap_char), str)
+        self.assertEqual(ExampleIUPACSequence.default_gap_char, '-')
+        self.assertEqual(ExampleIUPACSequence('').default_gap_char, '-')
+
+        with self.assertRaises(AttributeError):
+            ExampleIUPACSequence('').default_gap_char = '.'
+
     def test_alphabet(self):
         expected = set("ABC.-XYZ")
         self.assertIs(type(ExampleIUPACSequence.alphabet), set)
@@ -433,6 +417,19 @@ class TestIUPACSequence(TestCase):
                      key=str)
         self.assertEqual(obs, exp)
 
+    def test_to_regex_no_degens(self):
+        seq = ExampleIUPACSequence('ABC')
+        regex = seq.to_regex()
+        self.assertEqual(regex.pattern, str(seq))
+
+    def test_to_regex_with_degens(self):
+        seq = ExampleIUPACSequence('AYZ')
+        regex = seq.to_regex()
+        self.assertFalse(any(regex.match(s) is None
+                             for s in 'ABA ABC ACA ACC'.split()))
+        self.assertTrue(all(regex.match(s) is None
+                            for s in 'CBA BBA ABB AAA'.split()))
+
     def test_find_motifs_no_motif(self):
         seq = ExampleMotifsTester("ABCABCABC")
         with self.assertRaises(ValueError) as cm:
diff --git a/skbio/sequence/tests/test_rna.py b/skbio/sequence/tests/test_rna.py
new file mode 100644
index 0000000..b9e8240
--- /dev/null
+++ b/skbio/sequence/tests/test_rna.py
@@ -0,0 +1,45 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+
+from skbio import DNA, RNA
+
+
+# tests specific to RNA go here. tests for functionality shared by DNA and RNA
+# go in test_nucleotide_sequences.py
+class TestRNA(unittest.TestCase):
+    def test_reverse_transcribe(self):
+        # without changes
+        self.assertEqual(RNA('').reverse_transcribe(), DNA(''))
+        self.assertEqual(RNA('A').reverse_transcribe(), DNA('A'))
+        self.assertEqual(RNA('.ACGW-').reverse_transcribe(), DNA('.ACGW-'))
+
+        # with changes
+        self.assertEqual(DNA('T'), RNA('U').reverse_transcribe())
+        self.assertEqual(DNA('TT'), RNA('UU').reverse_transcribe())
+        self.assertEqual(DNA('ATCTG'), RNA('AUCUG').reverse_transcribe())
+        self.assertEqual(DNA('TTTG'), RNA('UUUG').reverse_transcribe())
+
+    def test_reverse_transcribe_preserves_all_metadata(self):
+        seq = RNA('AGUU', metadata={'foo': 'bar'},
+                  positional_metadata={'foo': range(4)})
+        exp = DNA('AGTT', metadata={'foo': 'bar'},
+                  positional_metadata={'foo': range(4)})
+        self.assertEqual(seq.reverse_transcribe(), exp)
+
+    def test_reverse_transcribe_does_not_modify_input(self):
+        seq = RNA('AUAU')
+        self.assertEqual(seq.reverse_transcribe(), DNA('ATAT'))
+        self.assertEqual(seq, RNA('AUAU'))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/sequence/tests/test_sequence.py b/skbio/sequence/tests/test_sequence.py
index 3c5578e..f23a991 100644
--- a/skbio/sequence/tests/test_sequence.py
+++ b/skbio/sequence/tests/test_sequence.py
@@ -11,9 +11,10 @@ import six
 from six.moves import zip_longest
 
 import copy
+import functools
 import re
 from types import GeneratorType
-from collections import Counter, defaultdict, Hashable
+from collections import Hashable
 from unittest import TestCase, main
 
 import numpy as np
@@ -24,6 +25,8 @@ from skbio import Sequence
 from skbio.util import assert_data_frame_almost_equal
 from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index,
                                       _as_slice_if_single_index)
+from skbio.util._testing import (ReallyEqualMixin, MetadataMixinTests,
+                                 PositionalMetadataMixinTests)
 
 
 class SequenceSubclass(Sequence):
@@ -31,8 +34,28 @@ class SequenceSubclass(Sequence):
     pass
 
 
-class TestSequence(TestCase):
+class SequenceSubclassTwo(Sequence):
+    """Used for testing purposes."""
+    pass
+
+
+class TestSequenceMetadata(TestCase, ReallyEqualMixin, MetadataMixinTests):
+    def setUp(self):
+        self._metadata_constructor_ = functools.partial(Sequence, '')
+
+
+class TestSequencePositionalMetadata(TestCase, ReallyEqualMixin,
+                                     PositionalMetadataMixinTests):
+    def setUp(self):
+        def factory(axis_len, positional_metadata=None):
+            return Sequence('Z' * axis_len,
+                            positional_metadata=positional_metadata)
+        self._positional_metadata_constructor_ = factory
+
+
+class TestSequence(TestCase, ReallyEqualMixin):
     def setUp(self):
+        self.lowercase_seq = Sequence('AAAAaaaa', lowercase='key')
         self.sequence_kinds = frozenset([
             str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
             lambda s: np.fromstring(s, dtype=np.uint8)])
@@ -50,6 +73,118 @@ class TestSequence(TestCase):
             np.array([]),
             np.array([], dtype=int)]
 
+    def test_concat_bad_how(self):
+        seq1 = seq2 = Sequence("123")
+        with self.assertRaises(ValueError):
+            Sequence.concat([seq1, seq2], how='foo')
+
+    def test_concat_on_subclass(self):
+        seq1 = SequenceSubclass("123")
+        seq2 = Sequence("123")
+        result = SequenceSubclass.concat([seq1, seq2])
+        self.assertIs(type(result), SequenceSubclass)
+        self.assertEqual(result, SequenceSubclass("123123"))
+
+    def test_concat_on_empty_iterator(self):
+        result = SequenceSubclass.concat((_ for _ in []))
+        self.assertIs(type(result), SequenceSubclass)
+        self.assertEqual(result, SequenceSubclass(""))
+
+    def test_concat_on_bad_subclass(self):
+        seq1 = Sequence("123")
+        seq2 = SequenceSubclassTwo("123")
+        with self.assertRaises(TypeError):
+            SequenceSubclass.concat([seq1, seq2])
+
+    def test_concat_default_how(self):
+        seq1 = Sequence("1234", positional_metadata={'a': [1]*4})
+        seq2 = Sequence("5678", positional_metadata={'a': [2]*4})
+        seqbad = Sequence("9", positional_metadata={'b': [9]})
+        result1 = Sequence.concat([seq1, seq2])
+        result2 = Sequence.concat([seq1, seq2], how='strict')
+        self.assertEqual(result1, result2)
+        with six.assertRaisesRegex(self, ValueError,
+                                   '.*positional.*metadata.*inner.*outer.*'):
+            Sequence.concat([seq1, seq2, seqbad])
+
+    def test_concat_strict_simple(self):
+        expected = Sequence(
+            "12345678", positional_metadata={'a': [1, 1, 1, 1, 2, 2, 2, 2]})
+        seq1 = Sequence("1234", positional_metadata={'a': [1]*4})
+        seq2 = Sequence("5678", positional_metadata={'a': [2]*4})
+        result = Sequence.concat([seq1, seq2], how='strict')
+        self.assertEqual(result, expected)
+        self.assertFalse(result.has_metadata())
+
+    def test_concat_strict_many(self):
+        odd_key = frozenset()
+        expected = Sequence("13579",
+                            positional_metadata={'a': list('skbio'),
+                                                 odd_key: [1, 2, 3, 4, 5]})
+        result = Sequence.concat([
+                Sequence("1", positional_metadata={'a': ['s'], odd_key: [1]}),
+                Sequence("3", positional_metadata={'a': ['k'], odd_key: [2]}),
+                Sequence("5", positional_metadata={'a': ['b'], odd_key: [3]}),
+                Sequence("7", positional_metadata={'a': ['i'], odd_key: [4]}),
+                Sequence("9", positional_metadata={'a': ['o'], odd_key: [5]})
+            ], how='strict')
+        self.assertEqual(result, expected)
+        self.assertFalse(result.has_metadata())
+
+    def test_concat_strict_fail(self):
+        seq1 = Sequence("1", positional_metadata={'a': [1]})
+        seq2 = Sequence("2", positional_metadata={'b': [2]})
+        with six.assertRaisesRegex(self, ValueError,
+                                   '.*positional.*metadata.*inner.*outer.*'):
+            Sequence.concat([seq1, seq2], how='strict')
+
+    def test_concat_outer_simple(self):
+        seq1 = Sequence("1234")
+        seq2 = Sequence("5678")
+        result = Sequence.concat([seq1, seq2], how='outer')
+        self.assertEqual(result, Sequence("12345678"))
+        self.assertFalse(result.has_metadata())
+
+    def test_concat_outer_missing(self):
+        a = {}
+        b = {}
+        seq1 = Sequence("12", positional_metadata={'a': ['1', '2']})
+        seq2 = Sequence("34", positional_metadata={'b': [3, 4], 'c': [a, b]})
+        seq3 = Sequence("56")
+        seq4 = Sequence("78", positional_metadata={'a': [7, 8]})
+        seq5 = Sequence("90", positional_metadata={'b': [9, 0]})
+
+        result = Sequence.concat([seq1, seq2, seq3, seq4, seq5], how='outer')
+        expected = Sequence("1234567890", positional_metadata={
+                                'a': ['1', '2', np.nan, np.nan, np.nan, np.nan,
+                                      7, 8, np.nan, np.nan],
+                                'b': [np.nan, np.nan, 3, 4, np.nan, np.nan,
+                                      np.nan, np.nan, 9, 0],
+                                'c': [np.nan, np.nan, a, b, np.nan, np.nan,
+                                      np.nan, np.nan, np.nan, np.nan]
+                            })
+        self.assertEqual(result, expected)
+        self.assertFalse(result.has_metadata())
+
+    def test_concat_inner_simple(self):
+        seq1 = Sequence("1234")
+        seq2 = Sequence("5678", positional_metadata={'discarded': [1] * 4})
+        result = Sequence.concat([seq1, seq2], how='inner')
+        self.assertEqual(result, Sequence("12345678"))
+        self.assertFalse(result.has_metadata())
+
+    def test_concat_inner_missing(self):
+        seq1 = Sequence("12", positional_metadata={'a': ['1', '2'],
+                                                   'c': [{}, {}]})
+        seq2 = Sequence("34", positional_metadata={'a': [3, 4], 'b': [3, 4]})
+        seq3 = Sequence("56", positional_metadata={'a': [5, 6], 'b': [5, 6]})
+
+        result = Sequence.concat([seq1, seq2, seq3], how='inner')
+        expected = Sequence("123456", positional_metadata={'a': ['1', '2', 3,
+                                                                 4, 5, 6]})
+        self.assertEqual(result, expected)
+        self.assertFalse(result.has_metadata())
+
     def test_init_default_parameters(self):
         seq = Sequence('.ABC123xyz-')
 
@@ -77,16 +212,10 @@ class TestSequence(TestCase):
             seq.positional_metadata,
             pd.DataFrame({'quality': range(11)}, index=np.arange(11)))
 
-    def test_init_handles_missing_metadata_efficiently(self):
-        seq = Sequence('ACGT')
-
-        # metadata attributes should be None and not initialized to a "missing"
-        # representation
-        self.assertIsNone(seq._metadata)
-        self.assertIsNone(seq._positional_metadata)
-
+    def test_init_from_sequence_handles_missing_metadata_efficiently(self):
         # initializing from an existing Sequence object should handle metadata
         # attributes efficiently on both objects
+        seq = Sequence('ACGT')
         new_seq = Sequence(seq)
         self.assertIsNone(seq._metadata)
         self.assertIsNone(seq._positional_metadata)
@@ -198,6 +327,16 @@ class TestSequence(TestCase):
             Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'},
                      positional_metadata={'quality': range(4)}))
 
+    def test_init_from_non_descendant_sequence_object(self):
+        seq = SequenceSubclass('ACGT')
+        with self.assertRaises(TypeError) as cm:
+            SequenceSubclassTwo(seq)
+
+        error = str(cm.exception)
+        self.assertIn("SequenceSubclass", error)
+        self.assertIn("SequenceSubclassTwo", error)
+        self.assertIn("cast", error)
+
     def test_init_from_contiguous_sequence_bytes_view(self):
         bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
         view = bytes[:3]
@@ -254,132 +393,6 @@ class TestSequence(TestCase):
         with self.assertRaises(ValueError):
             bytes[1] = 42
 
-    def test_init_empty_metadata(self):
-        for empty in None, {}:
-            seq = Sequence('', metadata=empty)
-
-            self.assertFalse(seq.has_metadata())
-            self.assertEqual(seq.metadata, {})
-
-    def test_init_empty_metadata_key(self):
-        seq = Sequence('', metadata={'': ''})
-
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata, {'': ''})
-
-    def test_init_empty_metadata_item(self):
-        seq = Sequence('', metadata={'foo': ''})
-
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata, {'foo': ''})
-
-    def test_init_single_character_metadata_item(self):
-        seq = Sequence('', metadata={'foo': 'z'})
-
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata, {'foo': 'z'})
-
-    def test_init_multiple_character_metadata_item(self):
-        seq = Sequence('', metadata={'foo': '\nabc\tdef  G123'})
-
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata, {'foo': '\nabc\tdef  G123'})
-
-    def test_init_metadata_multiple_keys(self):
-        seq = Sequence('', metadata={'foo': 'abc', 42: {'nested': 'metadata'}})
-
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata,
-                         {'foo': 'abc', 42: {'nested': 'metadata'}})
-
-    def test_init_empty_positional_metadata(self):
-        # empty seq with missing/empty positional metadata
-        for empty in None, {}, pd.DataFrame():
-            seq = Sequence('', positional_metadata=empty)
-
-            self.assertFalse(seq.has_metadata())
-            self.assertEqual(seq.metadata, {})
-
-            self.assertFalse(seq.has_positional_metadata())
-            assert_data_frame_almost_equal(seq.positional_metadata,
-                                           pd.DataFrame(index=np.arange(0)))
-
-        # non-empty seq with missing positional metadata
-        seq = Sequence('xyz', positional_metadata=None)
-
-        self.assertFalse(seq.has_metadata())
-        self.assertEqual(seq.metadata, {})
-
-        self.assertFalse(seq.has_positional_metadata())
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
-
-    def test_init_empty_positional_metadata_item(self):
-        for item in ([], (), np.array([])):
-            seq = Sequence('', positional_metadata={'foo': item})
-
-            self.assertFalse(seq.has_metadata())
-            self.assertEqual(seq.metadata, {})
-
-            self.assertTrue(seq.has_positional_metadata())
-            assert_data_frame_almost_equal(
-                seq.positional_metadata,
-                pd.DataFrame({'foo': item}, index=np.arange(0)))
-
-    def test_init_single_positional_metadata_item(self):
-        for item in ([2], (2, ), np.array([2])):
-            seq = Sequence('G', positional_metadata={'foo': item})
-
-            self.assertFalse(seq.has_metadata())
-            self.assertEqual(seq.metadata, {})
-
-            self.assertTrue(seq.has_positional_metadata())
-            assert_data_frame_almost_equal(
-                seq.positional_metadata,
-                pd.DataFrame({'foo': item}, index=np.arange(1)))
-
-    def test_init_multiple_positional_metadata_item(self):
-        for item in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
-                     (0, 42, 42, 1, 0, 8, 100, 0, 0),
-                     np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
-            seq = Sequence('G' * 9, positional_metadata={'foo': item})
-
-            self.assertFalse(seq.has_metadata())
-            self.assertEqual(seq.metadata, {})
-
-            self.assertTrue(seq.has_positional_metadata())
-            assert_data_frame_almost_equal(
-                seq.positional_metadata,
-                pd.DataFrame({'foo': item}, index=np.arange(9)))
-
-    def test_init_positional_metadata_multiple_columns(self):
-        seq = Sequence('^' * 5,
-                       positional_metadata={'foo': np.arange(5),
-                                            'bar': np.arange(5)[::-1]})
-
-        self.assertFalse(seq.has_metadata())
-        self.assertEqual(seq.metadata, {})
-
-        self.assertTrue(seq.has_positional_metadata())
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'foo': np.arange(5),
-                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
-
-    def test_init_positional_metadata_with_custom_index(self):
-        df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
-                          index=['a', 'b', 'c', 'd', 'e'])
-        seq = Sequence('^' * 5, positional_metadata=df)
-
-        self.assertFalse(seq.has_metadata())
-        self.assertEqual(seq.metadata, {})
-
-        self.assertTrue(seq.has_positional_metadata())
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'foo': np.arange(5),
-                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
-
     def test_init_invalid_sequence(self):
         # invalid dtype (numpy.ndarray input)
         with self.assertRaises(TypeError):
@@ -418,43 +431,6 @@ class TestSequence(TestCase):
         with self.assertRaises(UnicodeEncodeError):
             Sequence(u'abc\u1F30')
 
-    def test_init_invalid_metadata(self):
-        for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
-            with six.assertRaisesRegex(self, TypeError,
-                                       'metadata must be a dict'):
-                Sequence('abc', metadata=md)
-
-    def test_init_invalid_positional_metadata(self):
-        # not consumable by Pandas
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Positional metadata invalid. Must be '
-                                   'consumable by pd.DataFrame. '
-                                   'Original pandas error message: '):
-            Sequence('ACGT', positional_metadata=2)
-        # 0 elements
-        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(4\)'):
-            Sequence('ACGT', positional_metadata=[])
-        # not enough elements
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
-            Sequence('ACGT', positional_metadata=[2, 3, 4])
-        # too many elements
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
-            Sequence('ACGT', positional_metadata=[2, 3, 4, 5, 6])
-        # Series not enough rows
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
-            Sequence('ACGT', positional_metadata=pd.Series(range(3)))
-        # Series too many rows
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
-            Sequence('ACGT', positional_metadata=pd.Series(range(5)))
-        # DataFrame not enough rows
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
-            Sequence('ACGT',
-                     positional_metadata=pd.DataFrame({'quality': range(3)}))
-        # DataFrame too many rows
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
-            Sequence('ACGT',
-                     positional_metadata=pd.DataFrame({'quality': range(5)}))
-
     def test_values_property(self):
         # Property tests are only concerned with testing the interface
         # provided by the property: that it can be accessed, can't be
@@ -478,292 +454,21 @@ class TestSequence(TestCase):
         with self.assertRaises(AttributeError):
             seq.values = np.array("GGGG", dtype='c')
 
-    def test_metadata_property_getter(self):
-        md = {'foo': 'bar'}
-        seq = Sequence('', metadata=md)
-        self.assertIsInstance(seq.metadata, dict)
-        self.assertEqual(seq.metadata, md)
-        self.assertIsNot(seq.metadata, md)
-
-        # update existing key
-        seq.metadata['foo'] = 'baz'
-        self.assertEqual(seq.metadata, {'foo': 'baz'})
-
-        # add new key
-        seq.metadata['foo2'] = 'bar2'
-        self.assertEqual(seq.metadata, {'foo': 'baz', 'foo2': 'bar2'})
-
-    def test_metadata_property_getter_missing(self):
-        seq = Sequence('ACGT')
-
-        self.assertIsNone(seq._metadata)
-        self.assertEqual(seq.metadata, {})
-        self.assertIsNotNone(seq._metadata)
-
-    def test_metadata_property_setter(self):
-        md = {'foo': 'bar'}
-        seq = Sequence('', metadata=md)
-        self.assertEqual(seq.metadata, md)
-        self.assertIsNot(seq.metadata, md)
-
-        new_md = {'bar': 'baz', 42: 42}
-        seq.metadata = new_md
-        self.assertEqual(seq.metadata, new_md)
-        self.assertIsNot(seq.metadata, new_md)
-
-        seq.metadata = {}
-        self.assertEqual(seq.metadata, {})
-        self.assertFalse(seq.has_metadata())
-
-    def test_metadata_property_setter_invalid_type(self):
-        seq = Sequence('abc', metadata={123: 456})
-
-        for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
-                   pd.DataFrame()):
-            with six.assertRaisesRegex(self, TypeError,
-                                       'metadata must be a dict'):
-                seq.metadata = md
-
-            # object should still be usable and its original metadata shouldn't
-            # have changed
-            self.assertEqual(seq.metadata, {123: 456})
-
-    def test_metadata_property_deleter(self):
-        md = {'foo': 'bar'}
-        seq = Sequence('CAT', metadata=md)
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata, md)
-        self.assertIsNot(seq.metadata, md)
-
-        del seq.metadata
-        self.assertIsNone(seq._metadata)
-        self.assertFalse(seq.has_metadata())
-        self.assertEqual(seq.metadata, {})
-
-        # test deleting again
-        del seq.metadata
-        self.assertIsNone(seq._metadata)
-        self.assertFalse(seq.has_metadata())
-        self.assertEqual(seq.metadata, {})
-
-        # test deleting missing metadata immediately after instantiation
-        seq = Sequence('ACGT')
-        self.assertIsNone(seq._metadata)
-        del seq.metadata
-        self.assertIsNone(seq._metadata)
-
-    def test_metadata_property_shallow_copy(self):
-        md = {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]}
-        seq = Sequence('CAT', metadata=md)
-
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata, md)
-        self.assertIsNot(seq.metadata, md)
-
-        # updates to keys
-        seq.metadata['key1'] = 'new val'
-        self.assertEqual(seq.metadata,
-                         {'key1': 'new val', 'key2': 'val2', 'key3': [1, 2]})
-        # original metadata untouched
-        self.assertEqual(md, {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]})
-
-        # updates to mutable value (by reference)
-        seq.metadata['key3'].append(3)
+    def test_observed_chars_property(self):
+        self.assertEqual(Sequence('').observed_chars, set())
+        self.assertEqual(Sequence('x').observed_chars, {'x'})
+        self.assertEqual(Sequence('xYz').observed_chars, {'x', 'Y', 'z'})
+        self.assertEqual(Sequence('zzz').observed_chars, {'z'})
+        self.assertEqual(Sequence('xYzxxZz').observed_chars,
+                         {'x', 'Y', 'z', 'Z'})
+        self.assertEqual(Sequence('\t   ').observed_chars, {' ', '\t'})
         self.assertEqual(
-            seq.metadata,
-            {'key1': 'new val', 'key2': 'val2', 'key3': [1, 2, 3]})
-        # original metadata changed because we didn't deep copy
-        self.assertEqual(
-            md,
-            {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2, 3]})
-
-    def test_positional_metadata_property_getter(self):
-        md = pd.DataFrame({'foo': [22, 22, 0]})
-        seq = Sequence('ACA', positional_metadata=md)
-
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame({'foo': [22, 22, 0]}))
-        self.assertIsNot(seq.positional_metadata, md)
-
-        # update existing column
-        seq.positional_metadata['foo'] = [42, 42, 43]
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame({'foo': [42, 42, 43]}))
-
-        # add new column
-        seq.positional_metadata['foo2'] = [True, False, True]
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'foo': [42, 42, 43],
-                          'foo2': [True, False, True]}))
+            Sequence('aabbcc', metadata={'foo': 'bar'},
+                     positional_metadata={'foo': range(6)}).observed_chars,
+            {'a', 'b', 'c'})
 
-    def test_positional_metadata_property_getter_missing(self):
-        seq = Sequence('ACGT')
-
-        self.assertIsNone(seq._positional_metadata)
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame(index=np.arange(4)))
-        self.assertIsNotNone(seq._positional_metadata)
-
-    def test_positional_metadata_property_setter(self):
-        md = pd.DataFrame({'foo': [22, 22, 0]})
-        seq = Sequence('ACA', positional_metadata=md)
-
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame({'foo': [22, 22, 0]}))
-        self.assertIsNot(seq.positional_metadata, md)
-
-        new_md = pd.DataFrame({'bar': np.arange(3)}, index=['a', 'b', 'c'])
-        seq.positional_metadata = new_md
-
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'bar': np.arange(3)}, index=np.arange(3)))
-        self.assertIsNot(seq.positional_metadata, new_md)
-
-        seq.positional_metadata = pd.DataFrame(index=np.arange(3))
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
-        self.assertFalse(seq.has_positional_metadata())
-
-    def test_positional_metadata_property_setter_invalid_type(self):
-        # More extensive tests for invalid input are on Sequence.__init__ tests
-
-        seq = Sequence('abc', positional_metadata={'foo': [1, 2, 42]})
-
-        # not consumable by Pandas
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Positional metadata invalid. Must be '
-                                   'consumable by pd.DataFrame. '
-                                   'Original pandas error message: '):
-            seq.positional_metadata = 2
-
-        # object should still be usable and its original metadata shouldn't
-        # have changed
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame({'foo': [1, 2, 42]}))
-
-        # wrong length
-        with six.assertRaisesRegex(self, ValueError, '\(2\).*\(3\)'):
-            seq.positional_metadata = {'foo': [1, 2]}
-
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame({'foo': [1, 2, 42]}))
-
-        # None isn't valid when using setter (differs from constructor)
-        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(3\)'):
-            seq.positional_metadata = None
-
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame({'foo': [1, 2, 42]}))
-
-    def test_positional_metadata_property_deleter(self):
-        md = pd.DataFrame({'foo': [22, 22, 0]})
-        seq = Sequence('ACA', positional_metadata=md)
-
-        self.assertTrue(seq.has_positional_metadata())
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame({'foo': [22, 22, 0]}))
-        self.assertIsNot(seq.positional_metadata, md)
-
-        del seq.positional_metadata
-        self.assertIsNone(seq._positional_metadata)
-        self.assertFalse(seq.has_positional_metadata())
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
-
-        # test deleting again
-        del seq.positional_metadata
-        self.assertIsNone(seq._positional_metadata)
-        self.assertFalse(seq.has_positional_metadata())
-        assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
-
-        # test deleting missing positional metadata immediately after
-        # instantiation
-        seq = Sequence('ACGT')
-        self.assertIsNone(seq._positional_metadata)
-        del seq.positional_metadata
-        self.assertIsNone(seq._positional_metadata)
-
-    def test_positional_metadata_property_shallow_copy(self):
-        # define metadata as a DataFrame because this has the potential to have
-        # its underlying data shared
-        md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
-        seq = Sequence('ACA', positional_metadata=md)
-
-        self.assertTrue(seq.has_positional_metadata())
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0]}, index=np.arange(3)))
-        self.assertIsNot(seq.positional_metadata, md)
-
-        # original metadata untouched
-        orig_md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
-        assert_data_frame_almost_equal(md, orig_md)
-
-        # change values of column (using same dtype)
-        seq.positional_metadata['foo'] = [42, 42, 42]
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'foo': [42, 42, 42]}, index=np.arange(3)))
-
-        # original metadata untouched
-        assert_data_frame_almost_equal(md, orig_md)
-
-        # change single value of underlying data
-        seq.positional_metadata.values[0][0] = 10
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'foo': [10, 42, 42]}, index=np.arange(3)))
-
-        # original metadata untouched
-        assert_data_frame_almost_equal(md, orig_md)
-
-        # create column of object dtype -- these aren't deep copied
-        md = pd.DataFrame({'obj': [[], [], []]}, index=['a', 'b', 'c'])
-        seq = Sequence('ACA', positional_metadata=md)
-
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'obj': [[], [], []]}, index=np.arange(3)))
-
-        # mutate list
-        seq.positional_metadata['obj'][0].append(42)
-        assert_data_frame_almost_equal(
-            seq.positional_metadata,
-            pd.DataFrame({'obj': [[42], [], []]}, index=np.arange(3)))
-
-        # original metadata changed because we didn't do a full deep copy
-        assert_data_frame_almost_equal(
-            md,
-            pd.DataFrame({'obj': [[42], [], []]}, index=['a', 'b', 'c']))
-
-    def test_positional_metadata_property_set_column_series(self):
-        seq_text = 'ACGTACGT'
-        l = len(seq_text)
-        seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
-        seq.positional_metadata['bar'] = pd.Series(range(l-3))
-        # pandas.Series will be padded with NaN if too short
-        npt.assert_equal(seq.positional_metadata['bar'],
-                         np.array(list(range(l-3)) + [np.NaN]*3))
-        seq.positional_metadata['baz'] = pd.Series(range(l+3))
-        # pandas.Series will be truncated if too long
-        npt.assert_equal(seq.positional_metadata['baz'],
-                         np.array(range(l)))
-
-    def test_positional_metadata_property_set_column_array(self):
-        seq_text = 'ACGTACGT'
-        l = len(seq_text)
-        seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
-        # array-like objects will fail if wrong size
-        for array_like in (np.array(range(l-1)), range(l-1),
-                           np.array(range(l+1)), range(l+1)):
-            with six.assertRaisesRegex(self, ValueError,
-                                       "Length of values does not match "
-                                       "length of index"):
-                seq.positional_metadata['bar'] = array_like
+        with self.assertRaises(AttributeError):
+            Sequence('ACGT').observed_chars = {'a', 'b', 'c'}
 
     def test_eq_and_ne(self):
         seq_a = Sequence("A")
@@ -830,17 +535,6 @@ class TestSequence(TestCase):
         seq2 = SequenceSubclass('ACGT')
         self.assertFalse(seq1 == seq2)
 
-    def test_eq_metadata_mismatch(self):
-        # both provided
-        seq1 = Sequence('ACGT', metadata={'id': 'foo'})
-        seq2 = Sequence('ACGT', metadata={'id': 'bar'})
-        self.assertFalse(seq1 == seq2)
-
-        # one provided
-        seq1 = Sequence('ACGT', metadata={'id': 'foo'})
-        seq2 = Sequence('ACGT')
-        self.assertFalse(seq1 == seq2)
-
     def test_eq_positional_metadata_mismatch(self):
         # both provided
         seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
@@ -857,18 +551,6 @@ class TestSequence(TestCase):
         seq2 = Sequence('TGCA')
         self.assertFalse(seq1 == seq2)
 
-    def test_eq_handles_missing_metadata_efficiently(self):
-        seq1 = Sequence('ACGT')
-        seq2 = Sequence('ACGT')
-        self.assertTrue(seq1 == seq2)
-
-        # metadata attributes should be None and not initialized to a "missing"
-        # representation
-        self.assertIsNone(seq1._metadata)
-        self.assertIsNone(seq1._positional_metadata)
-        self.assertIsNone(seq2._metadata)
-        self.assertIsNone(seq2._positional_metadata)
-
     def test_getitem_gives_new_sequence(self):
         seq = Sequence("Sequence string !1 at 2#3?.,")
         self.assertFalse(seq is seq[:])
@@ -1197,6 +879,11 @@ class TestSequence(TestCase):
         self.assertIsNone(subseq._metadata)
         self.assertIsNone(subseq._positional_metadata)
 
+    def test_getitem_empty_positional_metadata(self):
+        seq = Sequence('ACGT')
+        seq.positional_metadata  # This will create empty positional_metadata
+        self.assertEqual(Sequence('A'), seq[0])
+
     def test_len(self):
         self.assertEqual(len(Sequence("")), 0)
         self.assertEqual(len(Sequence("a")), 1)
@@ -1290,7 +977,7 @@ class TestSequence(TestCase):
         # special cases is performed in SequenceReprDoctests below. here we
         # only test that pieces of the repr are present. these tests also
         # exercise coverage for py2/3 since the doctests in
-        # SequenceReprDoctests only currently run in py2.
+        # SequenceReprDoctests only currently run in py3.
 
         # minimal
         obs = repr(Sequence(''))
@@ -1401,6 +1088,20 @@ class TestSequence(TestCase):
         with self.assertRaises(TypeError):
             seq._to(metadata={'id': 'bar'}, unrecognized_kwarg='baz')
 
+    def test_to_no_positional_metadata(self):
+        seq = Sequence('ACGT')
+        seq.positional_metadata  # This will create empty positional metadata
+        result = seq._to(sequence='TGA')
+        self.assertIsNone(result._positional_metadata)
+        self.assertEqual(result, Sequence('TGA'))
+
+    def test_to_no_metadata(self):
+        seq = Sequence('ACGT')
+        seq.metadata  # This will create empty metadata
+        result = seq._to(sequence='TGA')
+        self.assertIsNone(result._metadata)
+        self.assertEqual(result, Sequence('TGA'))
+
     def test_count(self):
         def construct_char_array(s):
             return np.fromstring(s, dtype='|S1')
@@ -1433,6 +1134,27 @@ class TestSequence(TestCase):
         self.assertIn("Sequence", str(cm.exception))
         self.assertIn("SequenceSubclass", str(cm.exception))
 
+    def test_lowercase_mungeable_key(self):
+        # NOTE: This test relies on Sequence._munge_to_index_array working
+        # properly. If the internal implementation of the lowercase method
+        # changes to no longer use _munge_to_index_array, this test may need
+        # to be updated to cover cases currently covered by
+        # _munge_to_index_array
+        self.assertEqual('AAAAaaaa', self.lowercase_seq.lowercase('key'))
+
+    def test_lowercase_array_key(self):
+        # NOTE: This test relies on Sequence._munge_to_index_array working
+        # properly. If the internal implementation of the lowercase method
+        # changes to no longer use _munge_to_index_array, this test may need
+        # to be updated to cover cases currently covered by
+        # _munge_to_index_array
+        self.assertEqual('aaAAaaaa',
+                         self.lowercase_seq.lowercase(
+                             np.array([True, True, False, False, True, True,
+                                       True, True])))
+        self.assertEqual('AaAAaAAA',
+                         self.lowercase_seq.lowercase([1, 4]))
+
     def test_distance(self):
         tested = 0
         for constructor in self.sequence_kinds:
@@ -1618,6 +1340,166 @@ class TestSequence(TestCase):
         self.assertEqual(
             SequenceSubclass("ABCDEFG").index(SequenceSubclass("A")), 0)
 
+    def test_frequencies_empty_sequence(self):
+        seq = Sequence('')
+
+        self.assertEqual(seq.frequencies(), {})
+        self.assertEqual(seq.frequencies(relative=True), {})
+
+        self.assertEqual(seq.frequencies(chars=set()), {})
+        self.assertEqual(seq.frequencies(chars=set(), relative=True), {})
+
+        self.assertEqual(seq.frequencies(chars={'a', 'b'}), {'a': 0, 'b': 0})
+
+        # use npt.assert_equal to explicitly handle nan comparisons
+        npt.assert_equal(seq.frequencies(chars={'a', 'b'}, relative=True),
+                         {'a': np.nan, 'b': np.nan})
+
+    def test_frequencies_observed_chars(self):
+        seq = Sequence('x')
+        self.assertEqual(seq.frequencies(), {'x': 1})
+        self.assertEqual(seq.frequencies(relative=True), {'x': 1.0})
+
+        seq = Sequence('xYz')
+        self.assertEqual(seq.frequencies(), {'x': 1, 'Y': 1, 'z': 1})
+        self.assertEqual(seq.frequencies(relative=True),
+                         {'x': 1/3, 'Y': 1/3, 'z': 1/3})
+
+        seq = Sequence('zzz')
+        self.assertEqual(seq.frequencies(), {'z': 3})
+        self.assertEqual(seq.frequencies(relative=True), {'z': 1.0})
+
+        seq = Sequence('xYzxxZz')
+        self.assertEqual(seq.frequencies(), {'x': 3, 'Y': 1, 'Z': 1, 'z': 2})
+        self.assertEqual(seq.frequencies(relative=True),
+                         {'x': 3/7, 'Y': 1/7, 'Z': 1/7, 'z': 2/7})
+
+        seq = Sequence('\t   ')
+        self.assertEqual(seq.frequencies(), {'\t': 1, ' ': 3})
+        self.assertEqual(seq.frequencies(relative=True), {'\t': 1/4, ' ': 3/4})
+
+        seq = Sequence('aabbcc', metadata={'foo': 'bar'},
+                       positional_metadata={'foo': range(6)})
+        self.assertEqual(seq.frequencies(), {'a': 2, 'b': 2, 'c': 2})
+        self.assertEqual(seq.frequencies(relative=True),
+                         {'a': 2/6, 'b': 2/6, 'c': 2/6})
+
+    def test_frequencies_specified_chars(self):
+        seq = Sequence('abcbca')
+
+        self.assertEqual(seq.frequencies(chars=set()), {})
+        self.assertEqual(seq.frequencies(chars=set(), relative=True), {})
+
+        self.assertEqual(seq.frequencies(chars='a'), {'a': 2})
+        self.assertEqual(seq.frequencies(chars='a', relative=True), {'a': 2/6})
+
+        self.assertEqual(seq.frequencies(chars={'a'}), {'a': 2})
+        self.assertEqual(seq.frequencies(chars={'a'}, relative=True),
+                         {'a': 2/6})
+
+        self.assertEqual(seq.frequencies(chars={'a', 'b'}), {'a': 2, 'b': 2})
+        self.assertEqual(seq.frequencies(chars={'a', 'b'}, relative=True),
+                         {'a': 2/6, 'b': 2/6})
+
+        self.assertEqual(seq.frequencies(chars={'a', 'b', 'd'}),
+                         {'a': 2, 'b': 2, 'd': 0})
+        self.assertEqual(seq.frequencies(chars={'a', 'b', 'd'}, relative=True),
+                         {'a': 2/6, 'b': 2/6, 'd': 0.0})
+
+        self.assertEqual(seq.frequencies(chars={'x', 'y', 'z'}),
+                         {'x': 0, 'y': 0, 'z': 0})
+        self.assertEqual(seq.frequencies(chars={'x', 'y', 'z'}, relative=True),
+                         {'x': 0.0, 'y': 0.0, 'z': 0.0})
+
+    def test_frequencies_chars_varied_type(self):
+        seq = Sequence('zabczzzabcz')
+
+        # single character case (shortcut)
+        chars = b'z'
+        self.assertEqual(seq.frequencies(chars=chars), {b'z': 5})
+        self.assertEqual(seq.frequencies(chars=chars, relative=True),
+                         {b'z': 5/11})
+
+        chars = u'z'
+        self.assertEqual(seq.frequencies(chars=chars), {u'z': 5})
+        self.assertEqual(seq.frequencies(chars=chars, relative=True),
+                         {u'z': 5/11})
+
+        chars = np.fromstring('z', dtype='|S1')[0]
+        self.assertEqual(seq.frequencies(chars=chars), {b'z': 5})
+        self.assertEqual(seq.frequencies(chars=chars, relative=True),
+                         {b'z': 5/11})
+
+        # set of characters, some present, some not
+        chars = {b'x', b'z'}
+        self.assertEqual(seq.frequencies(chars=chars), {b'x': 0, b'z': 5})
+        self.assertEqual(seq.frequencies(chars=chars, relative=True),
+                         {b'x': 0.0, b'z': 5/11})
+
+        chars = {u'x', u'z'}
+        self.assertEqual(seq.frequencies(chars=chars), {u'x': 0, u'z': 5})
+        self.assertEqual(seq.frequencies(chars=chars, relative=True),
+                         {u'x': 0.0, u'z': 5/11})
+
+        chars = {
+            np.fromstring('x', dtype='|S1')[0],
+            np.fromstring('z', dtype='|S1')[0]
+        }
+        self.assertEqual(seq.frequencies(chars=chars), {b'x': 0, b'z': 5})
+        self.assertEqual(seq.frequencies(chars=chars, relative=True),
+                         {b'x': 0.0, b'z': 5/11})
+
+    def test_frequencies_equivalent_to_kmer_frequencies_k_of_1(self):
+        seq = Sequence('abcabc')
+
+        exp = {'a': 2, 'b': 2, 'c': 2}
+        self.assertEqual(seq.frequencies(chars=None), exp)
+        self.assertEqual(seq.kmer_frequencies(k=1), exp)
+
+        exp = {'a': 2/6, 'b': 2/6, 'c': 2/6}
+        self.assertEqual(seq.frequencies(chars=None, relative=True), exp)
+        self.assertEqual(seq.kmer_frequencies(k=1, relative=True), exp)
+
+    def test_frequencies_passing_observed_chars_equivalent_to_default(self):
+        seq = Sequence('abcabc')
+
+        exp = {'a': 2, 'b': 2, 'c': 2}
+        self.assertEqual(seq.frequencies(chars=None), exp)
+        self.assertEqual(seq.frequencies(chars=seq.observed_chars), exp)
+
+        exp = {'a': 2/6, 'b': 2/6, 'c': 2/6}
+        self.assertEqual(seq.frequencies(chars=None, relative=True), exp)
+        self.assertEqual(
+            seq.frequencies(chars=seq.observed_chars, relative=True),
+            exp)
+
+    def test_frequencies_invalid_chars(self):
+        seq = Sequence('abcabc')
+
+        with six.assertRaisesRegex(self, ValueError, '0 characters'):
+            seq.frequencies(chars='')
+
+        with six.assertRaisesRegex(self, ValueError, '0 characters'):
+            seq.frequencies(chars={''})
+
+        with six.assertRaisesRegex(self, ValueError, '2 characters'):
+            seq.frequencies(chars='ab')
+
+        with six.assertRaisesRegex(self, ValueError, '2 characters'):
+            seq.frequencies(chars={'b', 'ab'})
+
+        with six.assertRaisesRegex(self, TypeError, 'string.*NoneType'):
+            seq.frequencies(chars={'a', None})
+
+        with six.assertRaisesRegex(self, ValueError, 'outside the range'):
+            seq.frequencies(chars=u'\u1F30')
+
+        with six.assertRaisesRegex(self, ValueError, 'outside the range'):
+            seq.frequencies(chars={'c', u'\u1F30'})
+
+        with six.assertRaisesRegex(self, TypeError, 'set.*int'):
+            seq.frequencies(chars=42)
+
     def _compare_kmers_results(self, observed, expected):
         for obs, exp in zip_longest(observed, expected, fillvalue=None):
             self.assertEqual(obs, exp)
@@ -1847,57 +1729,64 @@ class TestSequence(TestCase):
         ]
         self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
 
+    def test_kmer_frequencies_empty_sequence(self):
+        seq = Sequence('')
+
+        self.assertEqual(seq.kmer_frequencies(1), {})
+        self.assertEqual(seq.kmer_frequencies(1, overlap=False), {})
+        self.assertEqual(seq.kmer_frequencies(1, relative=True), {})
+        self.assertEqual(seq.kmer_frequencies(1, relative=True, overlap=False),
+                         {})
+
     def test_kmer_frequencies(self):
         seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+
         # overlap = True
-        expected = Counter('GATTACA')
+        expected = {'G': 1, 'A': 3, 'T': 2, 'C': 1}
         self.assertEqual(seq.kmer_frequencies(1, overlap=True), expected)
-        expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
+
+        expected = {'GAT': 1, 'ATT': 1, 'TTA': 1, 'TAC': 1, 'ACA': 1}
         self.assertEqual(seq.kmer_frequencies(3, overlap=True), expected)
-        expected = Counter([])
+
+        expected = {}
         self.assertEqual(seq.kmer_frequencies(8, overlap=True), expected)
 
         # overlap = False
-        expected = Counter(['GAT', 'TAC'])
+        expected = {'GAT': 1, 'TAC': 1}
         self.assertEqual(seq.kmer_frequencies(3, overlap=False), expected)
-        expected = Counter(['GATTACA'])
+
+        expected = {'GATTACA': 1}
         self.assertEqual(seq.kmer_frequencies(7, overlap=False), expected)
-        expected = Counter([])
+
+        expected = {}
         self.assertEqual(seq.kmer_frequencies(8, overlap=False), expected)
 
     def test_kmer_frequencies_relative(self):
         seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+
         # overlap = True
-        expected = defaultdict(float)
-        expected['A'] = 3/7.
-        expected['C'] = 1/7.
-        expected['G'] = 1/7.
-        expected['T'] = 2/7.
+        expected = {'A': 3/7, 'C': 1/7, 'G': 1/7, 'T': 2/7}
         self.assertEqual(seq.kmer_frequencies(1, overlap=True, relative=True),
                          expected)
-        expected = defaultdict(float)
-        expected['GAT'] = 1/5.
-        expected['ATT'] = 1/5.
-        expected['TTA'] = 1/5.
-        expected['TAC'] = 1/5.
-        expected['ACA'] = 1/5.
+
+        expected = {'GAT': 1/5, 'ATT': 1/5, 'TTA': 1/5, 'TAC': 1/5, 'ACA': 1/5}
         self.assertEqual(seq.kmer_frequencies(3, overlap=True, relative=True),
                          expected)
-        expected = defaultdict(float)
+
+        expected = {}
         self.assertEqual(seq.kmer_frequencies(8, overlap=True, relative=True),
                          expected)
 
         # overlap = False
-        expected = defaultdict(float)
-        expected['GAT'] = 1/2.
-        expected['TAC'] = 1/2.
+        expected = {'GAT': 1/2, 'TAC': 1/2}
         self.assertEqual(seq.kmer_frequencies(3, overlap=False, relative=True),
                          expected)
-        expected = defaultdict(float)
-        expected['GATTACA'] = 1.0
+
+        expected = {'GATTACA': 1.0}
         self.assertEqual(seq.kmer_frequencies(7, overlap=False, relative=True),
                          expected)
-        expected = defaultdict(float)
+
+        expected = {}
         self.assertEqual(seq.kmer_frequencies(8, overlap=False, relative=True),
                          expected)
 
@@ -1918,8 +1807,7 @@ class TestSequence(TestCase):
         # 1.0. This occurs because 1/10 cannot be represented exactly as a
         # floating point number.
         seq = Sequence('AAAAAAAAAA')
-        self.assertEqual(seq.kmer_frequencies(1, relative=True),
-                         defaultdict(float, {'A': 1.0}))
+        self.assertEqual(seq.kmer_frequencies(1, relative=True), {'A': 1.0})
 
     def test_find_with_regex(self):
         seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
@@ -2045,39 +1933,6 @@ class TestSequence(TestCase):
             obs = s.iter_contiguous(c(contiguous()), invert=True)
             self.assertEqual(list(obs), exp)
 
-    def test_has_metadata(self):
-        # truly missing
-        seq = Sequence('ACGT')
-        self.assertFalse(seq.has_metadata())
-        # metadata attribute should be None and not initialized to a "missing"
-        # representation
-        self.assertIsNone(seq._metadata)
-
-        # looks empty
-        seq = Sequence('ACGT', metadata={})
-        self.assertFalse(seq.has_metadata())
-
-        # metadata is present
-        seq = Sequence('ACGT', metadata={'foo': 42})
-        self.assertTrue(seq.has_metadata())
-
-    def test_has_positional_metadata(self):
-        # truly missing
-        seq = Sequence('ACGT')
-        self.assertFalse(seq.has_positional_metadata())
-        # positional metadata attribute should be None and not initialized to a
-        # "missing" representation
-        self.assertIsNone(seq._positional_metadata)
-
-        # looks empty
-        seq = Sequence('ACGT',
-                       positional_metadata=pd.DataFrame(index=np.arange(4)))
-        self.assertFalse(seq.has_positional_metadata())
-
-        # positional metadata is present
-        seq = Sequence('ACGT', positional_metadata={'foo': [1, 2, 3, 4]})
-        self.assertTrue(seq.has_positional_metadata())
-
     def test_copy_without_metadata(self):
         # shallow vs deep copy with sequence only should be equivalent. thus,
         # copy.copy, copy.deepcopy, and Sequence.copy(deep=True|False) should
@@ -2175,6 +2030,13 @@ class TestSequence(TestCase):
                 pd.DataFrame({'bar': [[], [], [], []],
                               'baz': [42, 42, 42, 42]}))
 
+    def test_copy_preserves_read_only_flag_on_bytes(self):
+        seq = Sequence('ACGT')
+        seq_copy = copy.copy(seq)
+
+        with self.assertRaises(ValueError):
+            seq_copy._bytes[0] = 'B'
+
     def test_deepcopy_memo_is_respected(self):
         # basic test to ensure deepcopy's memo is passed through to recursive
         # deepcopy calls
@@ -2366,10 +2228,11 @@ class TestSequence(TestCase):
 #
 # these doctests exercise the correct formatting of Sequence's repr in a
 # variety of situations. they are more extensive than the unit tests above
-# (TestSequence.test_repr) but are only currently run in py2. thus, they cannot
+# (TestSequence.test_repr) but are only currently run in py3. thus, they cannot
 # be relied upon for coverage (the unit tests take care of this)
 class SequenceReprDoctests(object):
     r"""
+    >>> import pandas as pd
     >>> from skbio import Sequence
 
     Empty (minimal) sequence:
@@ -2557,12 +2420,12 @@ class SequenceReprDoctests(object):
     ...     42.5: 'abc ' * 200,
     ...     # unsupported type (tuple) key, unsupported type (list) value
     ...     ('foo', 'bar'): [1, 2, 3],
-    ...     # unicode key, single long word that wraps
-    ...     u'long word': 'abc' * 30,
+    ...     # bytes key, single long word that wraps
+    ...     b'long word': 'abc' * 30,
     ...     # truncated key (too long), None value
     ...     'too long of a key name to display in repr': None,
-    ...     # wrapped unicode value (has u'' prefix)
-    ...     'unicode wrapped value': u'abcd' * 25,
+    ...     # wrapped bytes value (has b'' prefix)
+    ...     'bytes wrapped value': b'abcd' * 25,
     ...     # float value
     ...     0.1: 99.9999,
     ...     # bool value
@@ -2570,19 +2433,19 @@ class SequenceReprDoctests(object):
     ...     # None key, complex value
     ...     None: complex(-1.0, 0.0),
     ...     # nested quotes
-    ...     10: '"\''}
+    ...     10: '"\''
     ... }
-    >>> positional_metadata = {
+    >>> positional_metadata = pd.DataFrame.from_items([
     ...     # str key, int list value
-    ...     'foo': [1, 2, 3, 4],
+    ...     ('foo', [1, 2, 3, 4]),
     ...     # float key, float list value
-    ...     42.5: [2.5, 3.0, 4.2, -0.00001],
+    ...     (42.5, [2.5, 3.0, 4.2, -0.00001]),
     ...     # int key, object list value
-    ...     42: [[], 4, 5, {}],
+    ...     (42, [[], 4, 5, {}]),
     ...     # truncated key (too long), bool list value
-    ...     'abc' * 90: [True, False, False, True],
+    ...     ('abc' * 90, [True, False, False, True]),
     ...     # None key
-    ...     None: range(4)}
+    ...     (None, range(4))])
     >>> Sequence('ACGT', metadata=metadata,
     ...          positional_metadata=positional_metadata)
     Sequence
@@ -2592,28 +2455,28 @@ class SequenceReprDoctests(object):
         True: 'abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
                abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
                abc abc abc abc '
+        b'long word': 'abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabca
+                       bcabcabcabcabcabcabcabcabcabcabcabcabc'
         0.1: 99.9999
-        42.5: <type 'str'>
+        42.5: <class 'str'>
         10: '"\''
         42: 'some words to test text wrapping and such... yada yada yada
              yada yada yada yada yada.'
         43: False
         'abc': 'some description'
-        'bar': <type 'dict'>
+        'bar': <class 'dict'>
+        'bytes wrapped value': b'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab
+                                 cdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
+                                 abcdabcdabcdabcd'
         'foo': 42
-        <type 'str'>: None
-        'unicode wrapped value': u'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
-                                   abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
-                                   abcdabcdabcdabcdabcd'
-        <type 'tuple'>: <type 'list'>
-        u'long word': 'abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabca
-                       bcabcabcabcabcabcabcabcabcabcabcabcabc'
+        <class 'str'>: None
+        <class 'tuple'>: <class 'list'>
     Positional metadata:
-        None: <dtype: int64>
-        42: <dtype: object>
-        42.5: <dtype: float64>
-        <type 'str'>: <dtype: bool>
         'foo': <dtype: int64>
+        42.5: <dtype: float64>
+        42: <dtype: object>
+        <class 'str'>: <dtype: bool>
+        None: <dtype: int64>
     Stats:
         length: 4
     -----------------------------------------------------------------------
diff --git a/skbio/stats/__init__.py b/skbio/stats/__init__.py
index 83fb67c..1b5809f 100644
--- a/skbio/stats/__init__.py
+++ b/skbio/stats/__init__.py
@@ -16,7 +16,6 @@ Subpackages
    distance
    evolve
    ordination
-   spatial
    gradient
    power
    composition
diff --git a/skbio/stats/__subsample.c b/skbio/stats/__subsample.c
index 261692c..73505ab 100644
--- a/skbio/stats/__subsample.c
+++ b/skbio/stats/__subsample.c
@@ -1,4 +1,4 @@
-/* Generated by Cython 0.22 */
+/* Generated by Cython 0.23.4 */
 
 /* BEGIN: Cython Metadata
 {
@@ -9,25 +9,13 @@
 END: Cython Metadata */
 
 #define PY_SSIZE_T_CLEAN
-#ifndef CYTHON_USE_PYLONG_INTERNALS
-#ifdef PYLONG_BITS_IN_DIGIT
-#define CYTHON_USE_PYLONG_INTERNALS 0
-#else
-#include "pyconfig.h"
-#ifdef PYLONG_BITS_IN_DIGIT
-#define CYTHON_USE_PYLONG_INTERNALS 1
-#else
-#define CYTHON_USE_PYLONG_INTERNALS 0
-#endif
-#endif
-#endif
 #include "Python.h"
 #ifndef Py_PYTHON_H
     #error Python headers needed to compile C extensions, please install development version of Python.
 #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
     #error Cython requires Python 2.6+ or Python 3.2+.
 #else
-#define CYTHON_ABI "0_22"
+#define CYTHON_ABI "0_23_4"
 #include <stddef.h>
 #ifndef offsetof
 #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
@@ -62,6 +50,9 @@ END: Cython Metadata */
 #define CYTHON_COMPILING_IN_PYPY 0
 #define CYTHON_COMPILING_IN_CPYTHON 1
 #endif
+#if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000
+#define CYTHON_USE_PYLONG_INTERNALS 1
+#endif
 #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
 #define Py_OptimizeFlag 0
 #endif
@@ -69,26 +60,30 @@ END: Cython Metadata */
 #define CYTHON_FORMAT_SSIZE_T "z"
 #if PY_MAJOR_VERSION < 3
   #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
           PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyClass_Type
 #else
   #define __Pyx_BUILTIN_MODULE_NAME "builtins"
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
           PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyType_Type
 #endif
-#if PY_MAJOR_VERSION >= 3
+#ifndef Py_TPFLAGS_CHECKTYPES
   #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
   #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
   #define Py_TPFLAGS_HAVE_NEWBUFFER 0
 #endif
-#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
   #define Py_TPFLAGS_HAVE_FINALIZE 0
 #endif
 #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
   #define CYTHON_PEP393_ENABLED 1
-  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ? \
+  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ?\
                                               0 : _PyUnicode_Ready((PyObject *)(op)))
   #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_LENGTH(u)
   #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
@@ -107,12 +102,13 @@ END: Cython Metadata */
 #if CYTHON_COMPILING_IN_PYPY
   #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
   #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
-  #define __Pyx_PyFrozenSet_Size(s)         PyObject_Size(s)
 #else
   #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
-  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
+  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
       PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
-  #define __Pyx_PyFrozenSet_Size(s)         PySet_Size(s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+  #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)
 #endif
 #define __Pyx_PyString_FormatSafe(a, b)   ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
 #define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
@@ -177,16 +173,18 @@ END: Cython Metadata */
 #else
   #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
 #endif
-#ifndef CYTHON_INLINE
-  #if defined(__GNUC__)
-    #define CYTHON_INLINE __inline__
-  #elif defined(_MSC_VER)
-    #define CYTHON_INLINE __inline
-  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-    #define CYTHON_INLINE inline
-  #else
-    #define CYTHON_INLINE
-  #endif
+#if PY_VERSION_HEX >= 0x030500B1
+#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+#elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+typedef struct {
+    unaryfunc am_await;
+    unaryfunc am_aiter;
+    unaryfunc am_anext;
+} __Pyx_PyAsyncMethodsStruct;
+#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+#else
+#define __Pyx_PyType_AsAsync(obj) NULL
 #endif
 #ifndef CYTHON_RESTRICT
   #if defined(__GNUC__)
@@ -199,35 +197,33 @@ END: Cython Metadata */
     #define CYTHON_RESTRICT
   #endif
 #endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+
+#ifndef CYTHON_INLINE
+  #if defined(__GNUC__)
+    #define CYTHON_INLINE __inline__
+  #elif defined(_MSC_VER)
+    #define CYTHON_INLINE __inline
+  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define CYTHON_INLINE inline
+  #else
+    #define CYTHON_INLINE
+  #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+  #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
 #ifdef NAN
 #define __PYX_NAN() ((float) NAN)
 #else
 static CYTHON_INLINE float __PYX_NAN() {
-  /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
-   a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
-   a quiet NaN. */
   float value;
   memset(&value, 0xFF, sizeof(value));
   return value;
 }
 #endif
-#define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None)
-#ifdef __cplusplus
-template<typename T>
-void __Pyx_call_destructor(T* x) {
-    x->~T();
-}
-template<typename T>
-class __Pyx_FakeReference {
-  public:
-    __Pyx_FakeReference() : ptr(NULL) { }
-    __Pyx_FakeReference(T& ref) : ptr(&ref) { }
-    T *operator->() { return ptr; }
-    operator T&() { return *ptr; }
-  private:
-    T *ptr;
-};
-#endif
 
 
 #if PY_MAJOR_VERSION >= 3
@@ -246,10 +242,6 @@ class __Pyx_FakeReference {
   #endif
 #endif
 
-#if defined(WIN32) || defined(MS_WINDOWS)
-#define _USE_MATH_DEFINES
-#endif
-#include <math.h>
 #define __PYX_HAVE__skbio__stats____subsample
 #define __PYX_HAVE_API__skbio__stats____subsample
 #include "string.h"
@@ -278,6 +270,13 @@ class __Pyx_FakeReference {
 #   define CYTHON_UNUSED
 # endif
 #endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+#  define CYTHON_NCP_UNUSED
+# else
+#  define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
 typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
                 const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
 
@@ -286,16 +285,34 @@ typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
 #define __PYX_DEFAULT_STRING_ENCODING ""
 #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
 #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (    \
-    (sizeof(type) < sizeof(Py_ssize_t))  ||             \
-    (sizeof(type) > sizeof(Py_ssize_t) &&               \
-          likely(v < (type)PY_SSIZE_T_MAX ||            \
-                 v == (type)PY_SSIZE_T_MAX)  &&         \
-          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||       \
-                                v == (type)PY_SSIZE_T_MIN)))  ||  \
-    (sizeof(type) == sizeof(Py_ssize_t) &&              \
-          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||        \
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (\
+    (sizeof(type) < sizeof(Py_ssize_t))  ||\
+    (sizeof(type) > sizeof(Py_ssize_t) &&\
+          likely(v < (type)PY_SSIZE_T_MAX ||\
+                 v == (type)PY_SSIZE_T_MAX)  &&\
+          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+                                v == (type)PY_SSIZE_T_MIN)))  ||\
+    (sizeof(type) == sizeof(Py_ssize_t) &&\
+          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
                                v == (type)PY_SSIZE_T_MAX)))  )
+#if defined (__cplusplus) && __cplusplus >= 201103L
+    #include <cstdlib>
+    #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+    #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+    #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER) && defined (_M_X64)
+    #define __Pyx_sst_abs(value) _abs64(value)
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+    #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+    #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
 static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
 static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
 #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
@@ -330,8 +347,9 @@ static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
 #define __Pyx_PyUnicode_FromUnicode(u)       PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
 #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
 #define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
-#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
-#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
 static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
 static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
 static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
@@ -500,7 +518,7 @@ typedef struct {
 } __Pyx_BufFmt_Context;
 
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":725
  * # in Cython to enable them only on the right systems.
  * 
  * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
@@ -509,7 +527,7 @@ typedef struct {
  */
 typedef npy_int8 __pyx_t_5numpy_int8_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":726
  * 
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
@@ -518,7 +536,7 @@ typedef npy_int8 __pyx_t_5numpy_int8_t;
  */
 typedef npy_int16 __pyx_t_5numpy_int16_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":727
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
@@ -527,7 +545,7 @@ typedef npy_int16 __pyx_t_5numpy_int16_t;
  */
 typedef npy_int32 __pyx_t_5numpy_int32_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":729
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":728
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t
  * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
@@ -536,7 +554,7 @@ typedef npy_int32 __pyx_t_5numpy_int32_t;
  */
 typedef npy_int64 __pyx_t_5numpy_int64_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":732
  * #ctypedef npy_int128     int128_t
  * 
  * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
@@ -545,7 +563,7 @@ typedef npy_int64 __pyx_t_5numpy_int64_t;
  */
 typedef npy_uint8 __pyx_t_5numpy_uint8_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":733
  * 
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
@@ -554,7 +572,7 @@ typedef npy_uint8 __pyx_t_5numpy_uint8_t;
  */
 typedef npy_uint16 __pyx_t_5numpy_uint16_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":734
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
@@ -563,7 +581,7 @@ typedef npy_uint16 __pyx_t_5numpy_uint16_t;
  */
 typedef npy_uint32 __pyx_t_5numpy_uint32_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":736
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":735
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t
  * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
@@ -572,7 +590,7 @@ typedef npy_uint32 __pyx_t_5numpy_uint32_t;
  */
 typedef npy_uint64 __pyx_t_5numpy_uint64_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":739
  * #ctypedef npy_uint128    uint128_t
  * 
  * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
@@ -581,7 +599,7 @@ typedef npy_uint64 __pyx_t_5numpy_uint64_t;
  */
 typedef npy_float32 __pyx_t_5numpy_float32_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":741
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":740
  * 
  * ctypedef npy_float32    float32_t
  * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
@@ -590,7 +608,7 @@ typedef npy_float32 __pyx_t_5numpy_float32_t;
  */
 typedef npy_float64 __pyx_t_5numpy_float64_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":749
  * # The int types are mapped a bit surprising --
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
@@ -599,7 +617,7 @@ typedef npy_float64 __pyx_t_5numpy_float64_t;
  */
 typedef npy_long __pyx_t_5numpy_int_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":750
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
@@ -608,7 +626,7 @@ typedef npy_long __pyx_t_5numpy_int_t;
  */
 typedef npy_longlong __pyx_t_5numpy_long_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":751
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t
  * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
@@ -617,7 +635,7 @@ typedef npy_longlong __pyx_t_5numpy_long_t;
  */
 typedef npy_longlong __pyx_t_5numpy_longlong_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":753
  * ctypedef npy_longlong   longlong_t
  * 
  * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
@@ -626,7 +644,7 @@ typedef npy_longlong __pyx_t_5numpy_longlong_t;
  */
 typedef npy_ulong __pyx_t_5numpy_uint_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":754
  * 
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
@@ -635,7 +653,7 @@ typedef npy_ulong __pyx_t_5numpy_uint_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":755
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t
  * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
@@ -644,7 +662,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":757
  * ctypedef npy_ulonglong  ulonglong_t
  * 
  * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
@@ -653,7 +671,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
  */
 typedef npy_intp __pyx_t_5numpy_intp_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":758
  * 
  * ctypedef npy_intp       intp_t
  * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
@@ -662,7 +680,7 @@ typedef npy_intp __pyx_t_5numpy_intp_t;
  */
 typedef npy_uintp __pyx_t_5numpy_uintp_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":760
  * ctypedef npy_uintp      uintp_t
  * 
  * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
@@ -671,7 +689,7 @@ typedef npy_uintp __pyx_t_5numpy_uintp_t;
  */
 typedef npy_double __pyx_t_5numpy_float_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":761
  * 
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
@@ -680,7 +698,7 @@ typedef npy_double __pyx_t_5numpy_float_t;
  */
 typedef npy_double __pyx_t_5numpy_double_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":762
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t
  * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
@@ -711,7 +729,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
 
 /*--- Type declarations ---*/
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":764
  * ctypedef npy_longdouble longdouble_t
  * 
  * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
@@ -720,7 +738,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
  */
 typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":765
  * 
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
@@ -729,7 +747,7 @@ typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
  */
 typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":767
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":766
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t
  * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
@@ -738,7 +756,7 @@ typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
  */
 typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":768
  * ctypedef npy_clongdouble clongdouble_t
  * 
  * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
@@ -764,19 +782,19 @@ typedef npy_cdouble __pyx_t_5numpy_complex_t;
   static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
   #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
 #ifdef WITH_THREAD
-  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
-          if (acquire_gil) { \
-              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
-              PyGILState_Release(__pyx_gilstate_save); \
-          } else { \
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+          if (acquire_gil) {\
+              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+              PyGILState_Release(__pyx_gilstate_save);\
+          } else {\
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
           }
 #else
-  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
           __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
 #endif
-  #define __Pyx_RefNannyFinishContext() \
+  #define __Pyx_RefNannyFinishContext()\
           __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
   #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
   #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
@@ -799,13 +817,13 @@ typedef npy_cdouble __pyx_t_5numpy_complex_t;
   #define __Pyx_XGOTREF(r)
   #define __Pyx_XGIVEREF(r)
 #endif
-#define __Pyx_XDECREF_SET(r, v) do {                            \
-        PyObject *tmp = (PyObject *) r;                         \
-        r = v; __Pyx_XDECREF(tmp);                              \
+#define __Pyx_XDECREF_SET(r, v) do {\
+        PyObject *tmp = (PyObject *) r;\
+        r = v; __Pyx_XDECREF(tmp);\
     } while (0)
-#define __Pyx_DECREF_SET(r, v) do {                             \
-        PyObject *tmp = (PyObject *) r;                         \
-        r = v; __Pyx_DECREF(tmp);                               \
+#define __Pyx_DECREF_SET(r, v) do {\
+        PyObject *tmp = (PyObject *) r;\
+        r = v; __Pyx_DECREF(tmp);\
     } while (0)
 #define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
 #define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
@@ -832,8 +850,8 @@ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
 
 static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
 
-static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
-    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
     const char* function_name);
 
 static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
@@ -874,7 +892,7 @@ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyOb
 
 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
 
-#if PY_MAJOR_VERSION >= 3
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
 static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
     PyObject *value;
     value = PyDict_GetItemWithError(d, key);
@@ -900,6 +918,8 @@ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
 
 static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
 
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
 typedef struct {
     int code_line;
     PyCodeObject* code_object;
@@ -942,8 +962,6 @@ typedef struct {
 static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
 static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
-
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value);
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value);
@@ -1052,6 +1070,8 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
 
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
 
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
+
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
 
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
@@ -1075,19 +1095,21 @@ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
 
 /* Module declarations from 'cpython.buffer' */
 
-/* Module declarations from 'cpython.ref' */
-
 /* Module declarations from 'libc.string' */
 
 /* Module declarations from 'libc.stdio' */
 
-/* Module declarations from 'cpython.object' */
-
 /* Module declarations from '__builtin__' */
 
 /* Module declarations from 'cpython.type' */
 static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
 
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.ref' */
+
 /* Module declarations from 'libc.stdlib' */
 
 /* Module declarations from 'numpy' */
@@ -1109,9 +1131,6 @@ int __pyx_module_is_main_skbio__stats____subsample = 0;
 static PyObject *__pyx_builtin_range;
 static PyObject *__pyx_builtin_ValueError;
 static PyObject *__pyx_builtin_RuntimeError;
-static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyObject *__pyx_v_n, PyObject *__pyx_v_counts_sum); /* proto */
-static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
 static char __pyx_k_B[] = "B";
 static char __pyx_k_H[] = "H";
 static char __pyx_k_I[] = "I";
@@ -1154,7 +1173,7 @@ static char __pyx_k_RuntimeError[] = "RuntimeError";
 static char __pyx_k_unpacked_idx[] = "unpacked_idx";
 static char __pyx_k_skbio_stats___subsample[] = "skbio.stats.__subsample";
 static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
-static char __pyx_k_home_evan_biocore_scikit_bio_sk[] = "/home/evan/biocore/scikit-bio/skbio/stats/__subsample.pyx";
+static char __pyx_k_Users_jairideout_dev_scikit_bio[] = "/Users/jairideout/dev/scikit-bio/skbio/stats/__subsample.pyx";
 static char __pyx_k_subsample_counts_without_replac[] = "_subsample_counts_without_replacement";
 static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
 static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
@@ -1165,13 +1184,13 @@ static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
 static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
 static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
 static PyObject *__pyx_n_s_RuntimeError;
+static PyObject *__pyx_kp_s_Users_jairideout_dev_scikit_bio;
 static PyObject *__pyx_n_s_ValueError;
 static PyObject *__pyx_n_s_cnt;
 static PyObject *__pyx_n_s_counts;
 static PyObject *__pyx_n_s_counts_sum;
 static PyObject *__pyx_n_s_dtype;
 static PyObject *__pyx_n_s_empty;
-static PyObject *__pyx_kp_s_home_evan_biocore_scikit_bio_sk;
 static PyObject *__pyx_n_s_i;
 static PyObject *__pyx_n_s_idx;
 static PyObject *__pyx_n_s_import;
@@ -1194,6 +1213,9 @@ static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
 static PyObject *__pyx_n_s_unpacked;
 static PyObject *__pyx_n_s_unpacked_idx;
 static PyObject *__pyx_n_s_zeros_like;
+static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyObject *__pyx_v_n, PyObject *__pyx_v_counts_sum); /* proto */
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
 static PyObject *__pyx_tuple_;
 static PyObject *__pyx_tuple__2;
 static PyObject *__pyx_tuple__3;
@@ -1322,7 +1344,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   Py_ssize_t __pyx_t_14;
   Py_ssize_t __pyx_t_15;
   npy_intp __pyx_t_16;
-  npy_intp __pyx_t_17;
+  Py_ssize_t __pyx_t_17;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
@@ -1364,11 +1386,11 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_v_counts_sum);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_counts_sum);
   __Pyx_GIVEREF(__pyx_v_counts_sum);
+  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_counts_sum);
   __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, ((PyObject *)((PyObject*)(&PyInt_Type)))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
@@ -1507,10 +1529,10 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   } else {
     __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
-    PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = NULL;
+    __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = NULL;
     __Pyx_INCREF(((PyObject *)__pyx_v_unpacked));
-    PyTuple_SET_ITEM(__pyx_t_2, 0+1, ((PyObject *)__pyx_v_unpacked));
     __Pyx_GIVEREF(((PyObject *)__pyx_v_unpacked));
+    PyTuple_SET_ITEM(__pyx_t_2, 0+1, ((PyObject *)__pyx_v_unpacked));
     __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
@@ -1569,10 +1591,10 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   } else {
     __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
+    __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __pyx_t_4 = NULL;
     __Pyx_INCREF(((PyObject *)__pyx_v_counts));
-    PyTuple_SET_ITEM(__pyx_t_1, 0+1, ((PyObject *)__pyx_v_counts));
     __Pyx_GIVEREF(((PyObject *)__pyx_v_counts));
+    PyTuple_SET_ITEM(__pyx_t_1, 0+1, ((PyObject *)__pyx_v_counts));
     __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
@@ -1689,7 +1711,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -1739,7 +1761,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GIVEREF(__pyx_v_info->obj);
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":203
  *             # of flags
  * 
  *             if info == NULL: return             # <<<<<<<<<<<<<<
@@ -1752,7 +1774,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L0;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":206
  * 
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1             # <<<<<<<<<<<<<<
@@ -1761,7 +1783,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_endian_detector = 1;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":207
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
@@ -1770,7 +1792,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":209
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  * 
  *             ndim = PyArray_NDIM(self)             # <<<<<<<<<<<<<<
@@ -1779,7 +1801,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":211
  *             ndim = PyArray_NDIM(self)
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -1789,7 +1811,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":212
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 copy_shape = 1             # <<<<<<<<<<<<<<
@@ -1797,22 +1819,30 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  *                 copy_shape = 0
  */
     __pyx_v_copy_shape = 1;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":211
+ *             ndim = PyArray_NDIM(self)
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 copy_shape = 1
+ *             else:
+ */
     goto __pyx_L4;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":214
  *                 copy_shape = 1
  *             else:
  *                 copy_shape = 0             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  */
+  /*else*/ {
     __pyx_v_copy_shape = 0;
   }
   __pyx_L4:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":216
  *                 copy_shape = 0
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
@@ -1826,7 +1856,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L6_bool_binop_done;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":217
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):             # <<<<<<<<<<<<<<
@@ -1836,9 +1866,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
   __pyx_t_1 = __pyx_t_2;
   __pyx_L6_bool_binop_done:;
+
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":216
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
@@ -1850,9 +1888,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_Raise(__pyx_t_3, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":216
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":220
  *                 raise ValueError(u"ndarray is not C contiguous")
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
@@ -1866,7 +1912,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L9_bool_binop_done;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":221
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):             # <<<<<<<<<<<<<<
@@ -1876,9 +1922,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
   __pyx_t_1 = __pyx_t_2;
   __pyx_L9_bool_binop_done:;
+
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":220
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
@@ -1890,9 +1944,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_Raise(__pyx_t_3, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":220
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":224
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  * 
  *             info.buf = PyArray_DATA(self)             # <<<<<<<<<<<<<<
@@ -1901,7 +1963,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":225
  * 
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim             # <<<<<<<<<<<<<<
@@ -1910,7 +1972,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->ndim = __pyx_v_ndim;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":226
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim
  *             if copy_shape:             # <<<<<<<<<<<<<<
@@ -1920,7 +1982,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (__pyx_v_copy_shape != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":229
  *                 # Allocate new buffer for strides and shape info.
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)             # <<<<<<<<<<<<<<
@@ -1929,7 +1991,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":230
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim             # <<<<<<<<<<<<<<
@@ -1938,7 +2000,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":231
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):             # <<<<<<<<<<<<<<
@@ -1949,7 +2011,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
       __pyx_v_i = __pyx_t_5;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":232
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]             # <<<<<<<<<<<<<<
@@ -1958,7 +2020,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":233
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]
  *                     info.shape[i] = PyArray_DIMS(self)[i]             # <<<<<<<<<<<<<<
@@ -1967,20 +2029,28 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
     }
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":226
+ *             info.buf = PyArray_DATA(self)
+ *             info.ndim = ndim
+ *             if copy_shape:             # <<<<<<<<<<<<<<
+ *                 # Allocate new buffer for strides and shape info.
+ *                 # This is allocated as one block, strides first.
+ */
     goto __pyx_L11;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":235
  *                     info.shape[i] = PyArray_DIMS(self)[i]
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)             # <<<<<<<<<<<<<<
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  */
+  /*else*/ {
     __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":236
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)             # <<<<<<<<<<<<<<
@@ -1991,7 +2061,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L11:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":237
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
@@ -2000,7 +2070,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->suboffsets = NULL;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":238
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)             # <<<<<<<<<<<<<<
@@ -2009,7 +2079,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":239
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)
  *             info.readonly = not PyArray_ISWRITEABLE(self)             # <<<<<<<<<<<<<<
@@ -2018,28 +2088,28 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":242
  * 
  *             cdef int t
  *             cdef char* f = NULL             # <<<<<<<<<<<<<<
  *             cdef dtype descr = self.descr
- *             cdef list stack
+ *             cdef int offset
  */
   __pyx_v_f = NULL;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":243
  *             cdef int t
  *             cdef char* f = NULL
  *             cdef dtype descr = self.descr             # <<<<<<<<<<<<<<
- *             cdef list stack
  *             cdef int offset
+ * 
  */
   __pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
   __Pyx_INCREF(__pyx_t_3);
   __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
   __pyx_t_3 = 0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":247
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":246
  *             cdef int offset
  * 
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)             # <<<<<<<<<<<<<<
@@ -2048,7 +2118,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":249
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":248
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
  * 
  *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
@@ -2066,7 +2136,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_L15_bool_binop_done:;
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":250
  *             if not hasfields and not copy_shape:
  *                 # do not call releasebuffer
  *                 info.obj = None             # <<<<<<<<<<<<<<
@@ -2078,17 +2148,25 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GOTREF(__pyx_v_info->obj);
     __Pyx_DECREF(__pyx_v_info->obj);
     __pyx_v_info->obj = Py_None;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":248
+ *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
+ * 
+ *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
+ *                 # do not call releasebuffer
+ *                 info.obj = None
+ */
     goto __pyx_L14;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":253
  *             else:
  *                 # need to call releasebuffer
  *                 info.obj = self             # <<<<<<<<<<<<<<
  * 
  *             if not hasfields:
  */
+  /*else*/ {
     __Pyx_INCREF(((PyObject *)__pyx_v_self));
     __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
     __Pyx_GOTREF(__pyx_v_info->obj);
@@ -2097,7 +2175,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L14:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":255
  *                 info.obj = self
  * 
  *             if not hasfields:             # <<<<<<<<<<<<<<
@@ -2107,7 +2185,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":256
  * 
  *             if not hasfields:
  *                 t = descr.type_num             # <<<<<<<<<<<<<<
@@ -2117,7 +2195,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __pyx_t_4 = __pyx_v_descr->type_num;
     __pyx_v_t = __pyx_t_4;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":257
  *             if not hasfields:
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
@@ -2137,7 +2215,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     }
     __pyx_L20_next_or:;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":258
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
@@ -2153,43 +2231,51 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
     __pyx_t_1 = __pyx_t_2;
     __pyx_L19_bool_binop_done:;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
+ */
     if (__pyx_t_1) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    }
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
- *                 elif t == NPY_CDOUBLE:     f = "Zd"
- *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
- *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
- *                 else:
- *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
  */
-    switch (__pyx_v_t) {
+    }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":260
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"             # <<<<<<<<<<<<<<
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  */
+    switch (__pyx_v_t) {
       case NPY_BYTE:
       __pyx_v_f = __pyx_k_b;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":261
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"             # <<<<<<<<<<<<<<
@@ -2200,7 +2286,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_B;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":262
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"             # <<<<<<<<<<<<<<
@@ -2211,7 +2297,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_h;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":263
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"             # <<<<<<<<<<<<<<
@@ -2222,7 +2308,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_H;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":264
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"             # <<<<<<<<<<<<<<
@@ -2233,7 +2319,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_i;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":265
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"             # <<<<<<<<<<<<<<
@@ -2244,7 +2330,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_I;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":266
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"             # <<<<<<<<<<<<<<
@@ -2255,7 +2341,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_l;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":267
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"             # <<<<<<<<<<<<<<
@@ -2266,7 +2352,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_L;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":268
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"             # <<<<<<<<<<<<<<
@@ -2277,7 +2363,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_q;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":269
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"             # <<<<<<<<<<<<<<
@@ -2288,7 +2374,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Q;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":270
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"             # <<<<<<<<<<<<<<
@@ -2299,7 +2385,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_f;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":271
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"             # <<<<<<<<<<<<<<
@@ -2310,7 +2396,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_d;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":272
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"             # <<<<<<<<<<<<<<
@@ -2321,7 +2407,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_g;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":273
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"             # <<<<<<<<<<<<<<
@@ -2332,7 +2418,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zf;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":274
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"             # <<<<<<<<<<<<<<
@@ -2343,7 +2429,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zd;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":275
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"             # <<<<<<<<<<<<<<
@@ -2354,7 +2440,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zg;
       break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":276
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
  *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
@@ -2366,33 +2452,33 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       break;
       default:
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":278
  *                 elif t == NPY_OBJECT:      f = "O"
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *                 info.format = f
  *                 return
  */
-      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
       __Pyx_GIVEREF(__pyx_t_6);
+      PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
       __pyx_t_6 = 0;
-      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       __Pyx_Raise(__pyx_t_6, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       break;
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":279
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f             # <<<<<<<<<<<<<<
@@ -2401,7 +2487,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->format = __pyx_v_f;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":280
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f
  *                 return             # <<<<<<<<<<<<<<
@@ -2410,19 +2496,27 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_r = 0;
     goto __pyx_L0;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":255
+ *                 info.obj = self
+ * 
+ *             if not hasfields:             # <<<<<<<<<<<<<<
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ */
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":282
  *                 return
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)             # <<<<<<<<<<<<<<
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  */
-    __pyx_v_info->format = ((char *)malloc(255));
+  /*else*/ {
+    __pyx_v_info->format = ((char *)malloc(0xFF));
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":283
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment             # <<<<<<<<<<<<<<
@@ -2431,7 +2525,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     (__pyx_v_info->format[0]) = '^';
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":284
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0             # <<<<<<<<<<<<<<
@@ -2440,17 +2534,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_offset = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":285
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  *                 f = _util_dtypestring(descr, info.format + 1,             # <<<<<<<<<<<<<<
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  */
-    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_v_f = __pyx_t_7;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":288
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  *                 f[0] = c'\0' # Terminate format string             # <<<<<<<<<<<<<<
@@ -2460,7 +2554,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     (__pyx_v_f[0]) = '\x00';
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -2492,7 +2586,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":290
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -2516,7 +2610,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__releasebuffer__", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":291
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
@@ -2526,7 +2620,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":292
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)             # <<<<<<<<<<<<<<
@@ -2534,11 +2628,17 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
  *                 stdlib.free(info.strides)
  */
     free(__pyx_v_info->format);
-    goto __pyx_L3;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":291
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
   }
-  __pyx_L3:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":293
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -2548,7 +2648,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":295
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":294
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 stdlib.free(info.strides)             # <<<<<<<<<<<<<<
@@ -2556,11 +2656,17 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
  * 
  */
     free(__pyx_v_info->strides);
-    goto __pyx_L4;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":293
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.strides)
+ *                 # info.shape was stored after info.strides in the same block
+ */
   }
-  __pyx_L4:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":290
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -2572,7 +2678,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __Pyx_RefNannyFinishContext();
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":770
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -2589,7 +2695,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":771
  * 
  * cdef inline object PyArray_MultiIterNew1(a):
  *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
@@ -2597,13 +2703,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
  * cdef inline object PyArray_MultiIterNew2(a, b):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":770
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -2622,7 +2728,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":773
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -2639,7 +2745,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":774
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
@@ -2647,13 +2753,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":773
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -2672,7 +2778,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":776
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -2689,7 +2795,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":777
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
@@ -2697,13 +2803,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":776
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -2722,7 +2828,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":779
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -2739,7 +2845,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":780
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
@@ -2747,13 +2853,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":779
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -2772,7 +2878,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":782
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -2789,7 +2895,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":783
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
@@ -2797,13 +2903,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":782
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -2822,7 +2928,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":785
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -2854,17 +2960,17 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_util_dtypestring", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":793
- *     cdef int delta_offset
- *     cdef tuple i
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":790
+ * 
+ *     cdef dtype child
  *     cdef int endian_detector = 1             # <<<<<<<<<<<<<<
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  *     cdef tuple fields
  */
   __pyx_v_endian_detector = 1;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
- *     cdef tuple i
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":791
+ *     cdef dtype child
  *     cdef int endian_detector = 1
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
  *     cdef tuple fields
@@ -2872,7 +2978,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -2881,20 +2987,21 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   if (unlikely(__pyx_v_descr->names == Py_None)) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
   for (;;) {
     if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
     #if CYTHON_COMPILING_IN_CPYTHON
-    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #else
-    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
     #endif
     __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
     __pyx_t_3 = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":795
  * 
  *     for childname in descr.names:
  *         fields = descr.fields[childname]             # <<<<<<<<<<<<<<
@@ -2903,15 +3010,15 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
     if (unlikely(__pyx_v_descr->fields == Py_None)) {
       PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_3);
-    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
     __pyx_t_3 = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":796
  *     for childname in descr.names:
  *         fields = descr.fields[childname]
  *         child, new_offset = fields             # <<<<<<<<<<<<<<
@@ -2928,7 +3035,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       if (unlikely(size != 2)) {
         if (size > 2) __Pyx_RaiseTooManyValuesError(2);
         else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       #if CYTHON_COMPILING_IN_CPYTHON
       __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
@@ -2936,52 +3043,60 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __Pyx_INCREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_t_4);
       #else
-      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       #endif
     } else {
-      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
     __pyx_t_3 = 0;
     __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
     __pyx_t_4 = 0;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":798
  *         child, new_offset = fields
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  */
-    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
     if (__pyx_t_6) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":799
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":798
+ *         child, new_offset = fields
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ */
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":801
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
@@ -3001,7 +3116,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     }
     __pyx_L8_next_or:;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":802
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
@@ -3017,23 +3132,39 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
     __pyx_t_6 = __pyx_t_7;
     __pyx_L7_bool_binop_done:;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
     if (__pyx_t_6) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":803
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":813
  * 
  *         # Output padding bytes
  *         while offset[0] < new_offset:             # <<<<<<<<<<<<<<
@@ -3041,24 +3172,24 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             f += 1
  */
     while (1) {
-      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (!__pyx_t_6) break;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":814
  *         # Output padding bytes
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte             # <<<<<<<<<<<<<<
  *             f += 1
  *             offset[0] += 1
  */
-      (__pyx_v_f[0]) = 120;
+      (__pyx_v_f[0]) = 0x78;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":815
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte
  *             f += 1             # <<<<<<<<<<<<<<
@@ -3067,7 +3198,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
       __pyx_v_f = (__pyx_v_f + 1);
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":816
  *             f[0] = 120 # "x"; pad byte
  *             f += 1
  *             offset[0] += 1             # <<<<<<<<<<<<<<
@@ -3078,7 +3209,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
     }
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":818
  *             offset[0] += 1
  * 
  *         offset[0] += child.itemsize             # <<<<<<<<<<<<<<
@@ -3088,7 +3219,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_8 = 0;
     (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":820
  *         offset[0] += child.itemsize
  * 
  *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
@@ -3098,19 +3229,19 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
     if (__pyx_t_6) {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":821
  * 
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num             # <<<<<<<<<<<<<<
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")
  */
-      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
       __pyx_t_4 = 0;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":822
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num
  *             if end - f < 5:             # <<<<<<<<<<<<<<
@@ -3120,357 +3251,365 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
       if (__pyx_t_6) {
 
-        /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+        /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         __Pyx_Raise(__pyx_t_4, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+        /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":822
+ *         if not PyDataType_HASFIELDS(child):
+ *             t = child.type_num
+ *             if end - f < 5:             # <<<<<<<<<<<<<<
+ *                 raise RuntimeError(u"Format string allocated too short.")
+ * 
+ */
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":826
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 98;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":827
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"             # <<<<<<<<<<<<<<
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 66;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":828
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"             # <<<<<<<<<<<<<<
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 104;
+        (__pyx_v_f[0]) = 0x68;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":829
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"             # <<<<<<<<<<<<<<
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 72;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":830
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 105;
+        (__pyx_v_f[0]) = 0x69;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":831
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 73;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":832
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 108;
+        (__pyx_v_f[0]) = 0x6C;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":833
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 76;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":834
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 113;
+        (__pyx_v_f[0]) = 0x71;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":835
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 81;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":836
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"             # <<<<<<<<<<<<<<
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 102;
+        (__pyx_v_f[0]) = 0x66;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":837
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 100;
+        (__pyx_v_f[0]) = 0x64;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":838
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"             # <<<<<<<<<<<<<<
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
-        (__pyx_v_f[0]) = 103;
+        (__pyx_v_f[0]) = 0x67;
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":839
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf             # <<<<<<<<<<<<<<
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 102;
+        (__pyx_v_f[1]) = 0x66;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":843
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":840
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd             # <<<<<<<<<<<<<<
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 100;
+        (__pyx_v_f[1]) = 0x64;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":841
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg             # <<<<<<<<<<<<<<
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
-        (__pyx_v_f[1]) = 103;
+        (__pyx_v_f[1]) = 0x67;
         __pyx_v_f = (__pyx_v_f + 1);
         goto __pyx_L15;
       }
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":842
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"             # <<<<<<<<<<<<<<
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 79;
         goto __pyx_L15;
       }
-      /*else*/ {
 
-        /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":844
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *             f += 1
  *         else:
  */
-        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      /*else*/ {
+        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
-        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
-        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __Pyx_GIVEREF(__pyx_t_3);
+        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __pyx_t_3 = 0;
-        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
         __Pyx_Raise(__pyx_t_3, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       __pyx_L15:;
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":845
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *             f += 1             # <<<<<<<<<<<<<<
@@ -3478,23 +3617,31 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             # Cython ignores struct boundary information ("T{...}"),
  */
       __pyx_v_f = (__pyx_v_f + 1);
+
+      /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":820
+ *         offset[0] += child.itemsize
+ * 
+ *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
+ *             t = child.type_num
+ *             if end - f < 5:
+ */
       goto __pyx_L13;
     }
-    /*else*/ {
 
-      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":849
  *             # Cython ignores struct boundary information ("T{...}"),
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)             # <<<<<<<<<<<<<<
  *     return f
  * 
  */
-      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    /*else*/ {
+      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_v_f = __pyx_t_9;
     }
     __pyx_L13:;
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -3504,7 +3651,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":850
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)
  *     return f             # <<<<<<<<<<<<<<
@@ -3514,7 +3661,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   __pyx_r = __pyx_v_f;
   goto __pyx_L0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":785
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -3539,7 +3686,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   return __pyx_r;
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":966
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -3554,7 +3701,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   int __pyx_t_2;
   __Pyx_RefNannySetupContext("set_array_base", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":968
  * cdef inline void set_array_base(ndarray arr, object base):
  *      cdef PyObject* baseptr
  *      if base is None:             # <<<<<<<<<<<<<<
@@ -3565,7 +3712,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __pyx_t_2 = (__pyx_t_1 != 0);
   if (__pyx_t_2) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":969
  *      cdef PyObject* baseptr
  *      if base is None:
  *          baseptr = NULL             # <<<<<<<<<<<<<<
@@ -3573,20 +3720,28 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  *          Py_INCREF(base) # important to do this before decref below!
  */
     __pyx_v_baseptr = NULL;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":968
+ * cdef inline void set_array_base(ndarray arr, object base):
+ *      cdef PyObject* baseptr
+ *      if base is None:             # <<<<<<<<<<<<<<
+ *          baseptr = NULL
+ *      else:
+ */
     goto __pyx_L3;
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":971
  *          baseptr = NULL
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!             # <<<<<<<<<<<<<<
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  */
+  /*else*/ {
     Py_INCREF(__pyx_v_base);
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":975
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":972
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base             # <<<<<<<<<<<<<<
@@ -3597,7 +3752,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   }
   __pyx_L3:;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":973
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)             # <<<<<<<<<<<<<<
@@ -3606,7 +3761,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   Py_XDECREF(__pyx_v_arr->base);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":974
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  *      arr.base = baseptr             # <<<<<<<<<<<<<<
@@ -3615,7 +3770,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   __pyx_v_arr->base = __pyx_v_baseptr;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":966
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -3627,7 +3782,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __Pyx_RefNannyFinishContext();
 }
 
-/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+/* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -3641,7 +3796,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("get_array_base", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":977
  * 
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:             # <<<<<<<<<<<<<<
@@ -3651,7 +3806,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":981
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":978
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:
  *         return None             # <<<<<<<<<<<<<<
@@ -3662,21 +3817,29 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
     __Pyx_INCREF(Py_None);
     __pyx_r = Py_None;
     goto __pyx_L0;
+
+    /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":977
+ * 
+ * cdef inline object get_array_base(ndarray arr):
+ *     if arr.base is NULL:             # <<<<<<<<<<<<<<
+ *         return None
+ *     else:
+ */
   }
-  /*else*/ {
 
-    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":983
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":980
  *         return None
  *     else:
  *         return <object>arr.base             # <<<<<<<<<<<<<<
  */
+  /*else*/ {
     __Pyx_XDECREF(__pyx_r);
     __Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
     __pyx_r = ((PyObject *)__pyx_v_arr->base);
     goto __pyx_L0;
   }
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -3718,13 +3881,13 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
   {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
   {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
+  {&__pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_k_Users_jairideout_dev_scikit_bio, sizeof(__pyx_k_Users_jairideout_dev_scikit_bio), 0, 0, 1, 0},
   {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
   {&__pyx_n_s_cnt, __pyx_k_cnt, sizeof(__pyx_k_cnt), 0, 0, 1, 1},
   {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1},
   {&__pyx_n_s_counts_sum, __pyx_k_counts_sum, sizeof(__pyx_k_counts_sum), 0, 0, 1, 1},
   {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
   {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
-  {&__pyx_kp_s_home_evan_biocore_scikit_bio_sk, __pyx_k_home_evan_biocore_scikit_bio_sk, sizeof(__pyx_k_home_evan_biocore_scikit_bio_sk), 0, 0, 1, 0},
   {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
   {&__pyx_n_s_idx, __pyx_k_idx, sizeof(__pyx_k_idx), 0, 0, 1, 1},
   {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
@@ -3752,7 +3915,7 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
 static int __Pyx_InitCachedBuiltins(void) {
   __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;
   __pyx_L1_error:;
   return -1;
@@ -3762,7 +3925,7 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
@@ -3773,7 +3936,7 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_GOTREF(__pyx_tuple_);
   __Pyx_GIVEREF(__pyx_tuple_);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
@@ -3784,47 +3947,47 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_GOTREF(__pyx_tuple__2);
   __Pyx_GIVEREF(__pyx_tuple__2);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__3);
   __Pyx_GIVEREF(__pyx_tuple__3);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":799
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__4);
   __Pyx_GIVEREF(__pyx_tuple__4);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":803
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__5);
   __Pyx_GIVEREF(__pyx_tuple__5);
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__6);
   __Pyx_GIVEREF(__pyx_tuple__6);
 
@@ -3838,7 +4001,7 @@ static int __Pyx_InitCachedConstants(void) {
   __pyx_tuple__7 = PyTuple_Pack(11, __pyx_n_s_counts, __pyx_n_s_n, __pyx_n_s_counts_sum, __pyx_n_s_result, __pyx_n_s_permuted, __pyx_n_s_unpacked, __pyx_n_s_cnt, __pyx_n_s_unpacked_idx, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_idx); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__7);
   __Pyx_GIVEREF(__pyx_tuple__7);
-  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_evan_biocore_scikit_bio_sk, __pyx_n_s_subsample_counts_without_replac, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_n_s_subsample_counts_without_replac, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_RefNannyFinishContext();
   return 0;
   __pyx_L1_error:;
@@ -3876,18 +4039,24 @@ PyMODINIT_FUNC PyInit___subsample(void)
   }
   #endif
   __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit___subsample(void)", 0);
-  if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #ifdef __Pyx_CyFunction_USED
-  if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
   #ifdef __Pyx_FusedFunction_USED
   if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
+  #ifdef __Pyx_Coroutine_USED
+  if (__pyx_Coroutine_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
   #ifdef __Pyx_Generator_USED
   if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
+  #ifdef __Pyx_StopAsyncIteration_USED
+  if (__pyx_StopAsyncIteration_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
   /*--- Library function declarations ---*/
   /*--- Threads initialization code ---*/
   #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
@@ -3910,12 +4079,12 @@ PyMODINIT_FUNC PyInit___subsample(void)
   #endif
   if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
   /*--- Initialize various global constants etc. ---*/
-  if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitGlobals() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
   if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
   if (__pyx_module_is_main_skbio__stats____subsample) {
-    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   #if PY_MAJOR_VERSION >= 3
   {
@@ -3926,9 +4095,9 @@ PyMODINIT_FUNC PyInit___subsample(void)
   }
   #endif
   /*--- Builtin init code ---*/
-  if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitCachedBuiltins() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Constants init code ---*/
-  if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_InitCachedConstants() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Global init code ---*/
   /*--- Variable export code ---*/
   /*--- Function export code ---*/
@@ -3945,10 +4114,13 @@ PyMODINIT_FUNC PyInit___subsample(void)
   __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Variable import code ---*/
   /*--- Function import code ---*/
   /*--- Execution code ---*/
+  #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+  if (__Pyx_patch_abc() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
 
   /* "skbio/stats/__subsample.pyx":11
  * from __future__ import absolute_import, division, print_function
@@ -3984,7 +4156,7 @@ PyMODINIT_FUNC PyInit___subsample(void)
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
+  /* "../../miniconda/envs/scikit-bio/lib/python3.5/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -4860,13 +5032,17 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObjec
 }
 #else
 static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
-    PyObject* args = PyTuple_Pack(1, arg);
-    return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL;
+    PyObject *result;
+    PyObject *args = PyTuple_Pack(1, arg);
+    if (unlikely(!args)) return NULL;
+    result = __Pyx_PyObject_Call(func, args, NULL);
+    Py_DECREF(args);
+    return result;
 }
 #endif
 
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
-        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
+        Py_ssize_t cstart, Py_ssize_t cstop,
         PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
         int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
 #if CYTHON_COMPILING_IN_CPYTHON
@@ -5067,10 +5243,13 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
         if (value && PyExceptionInstance_Check(value)) {
             instance_class = (PyObject*) Py_TYPE(value);
             if (instance_class != type) {
-                if (PyObject_IsSubclass(instance_class, type)) {
-                    type = instance_class;
-                } else {
+                int is_subclass = PyObject_IsSubclass(instance_class, type);
+                if (!is_subclass) {
                     instance_class = NULL;
+                } else if (unlikely(is_subclass == -1)) {
+                    goto bad;
+                } else {
+                    type = instance_class;
                 }
             }
         }
@@ -5130,7 +5309,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
     if (tb) {
 #if CYTHON_COMPILING_IN_PYPY
         PyObject *tmp_type, *tmp_value, *tmp_tb;
-        PyErr_Fetch(tmp_type, tmp_value, tmp_tb);
+        PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
         Py_INCREF(tb);
         PyErr_Restore(tmp_type, tmp_value, tb);
         Py_XDECREF(tmp_tb);
@@ -5165,13 +5344,86 @@ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
 }
 
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+    PyObject *empty_list = 0;
+    PyObject *module = 0;
+    PyObject *global_dict = 0;
+    PyObject *empty_dict = 0;
+    PyObject *list;
+    #if PY_VERSION_HEX < 0x03030000
+    PyObject *py_import;
+    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+    if (!py_import)
+        goto bad;
+    #endif
+    if (from_list)
+        list = from_list;
+    else {
+        empty_list = PyList_New(0);
+        if (!empty_list)
+            goto bad;
+        list = empty_list;
+    }
+    global_dict = PyModule_GetDict(__pyx_m);
+    if (!global_dict)
+        goto bad;
+    empty_dict = PyDict_New();
+    if (!empty_dict)
+        goto bad;
+    {
+        #if PY_MAJOR_VERSION >= 3
+        if (level == -1) {
+            if (strchr(__Pyx_MODULE_NAME, '.')) {
+                #if PY_VERSION_HEX < 0x03030000
+                PyObject *py_level = PyInt_FromLong(1);
+                if (!py_level)
+                    goto bad;
+                module = PyObject_CallFunctionObjArgs(py_import,
+                    name, global_dict, empty_dict, list, py_level, NULL);
+                Py_DECREF(py_level);
+                #else
+                module = PyImport_ImportModuleLevelObject(
+                    name, global_dict, empty_dict, list, 1);
+                #endif
+                if (!module) {
+                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
+                        goto bad;
+                    PyErr_Clear();
+                }
+            }
+            level = 0;
+        }
+        #endif
+        if (!module) {
+            #if PY_VERSION_HEX < 0x03030000
+            PyObject *py_level = PyInt_FromLong(level);
+            if (!py_level)
+                goto bad;
+            module = PyObject_CallFunctionObjArgs(py_import,
+                name, global_dict, empty_dict, list, py_level, NULL);
+            Py_DECREF(py_level);
+            #else
+            module = PyImport_ImportModuleLevelObject(
+                name, global_dict, empty_dict, list, level);
+            #endif
+        }
+    }
+bad:
+    #if PY_VERSION_HEX < 0x03030000
+    Py_XDECREF(py_import);
+    #endif
+    Py_XDECREF(empty_list);
+    Py_XDECREF(empty_dict);
+    return module;
+}
+
 static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
     int start = 0, mid = 0, end = count - 1;
     if (end >= 0 && code_line > entries[end].code_line) {
         return count;
     }
     while (start < end) {
-        mid = (start + end) / 2;
+        mid = start + (end - start) / 2;
         if (code_line < entries[mid].code_line) {
             end = mid;
         } else if (code_line > entries[mid].code_line) {
@@ -5345,95 +5597,22 @@ static void __Pyx_ReleaseBuffer(Py_buffer *view) {
 #endif
 
 
-        static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
-    PyObject *empty_list = 0;
-    PyObject *module = 0;
-    PyObject *global_dict = 0;
-    PyObject *empty_dict = 0;
-    PyObject *list;
-    #if PY_VERSION_HEX < 0x03030000
-    PyObject *py_import;
-    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
-    if (!py_import)
-        goto bad;
-    #endif
-    if (from_list)
-        list = from_list;
-    else {
-        empty_list = PyList_New(0);
-        if (!empty_list)
-            goto bad;
-        list = empty_list;
-    }
-    global_dict = PyModule_GetDict(__pyx_m);
-    if (!global_dict)
-        goto bad;
-    empty_dict = PyDict_New();
-    if (!empty_dict)
-        goto bad;
-    {
-        #if PY_MAJOR_VERSION >= 3
-        if (level == -1) {
-            if (strchr(__Pyx_MODULE_NAME, '.')) {
-                #if PY_VERSION_HEX < 0x03030000
-                PyObject *py_level = PyInt_FromLong(1);
-                if (!py_level)
-                    goto bad;
-                module = PyObject_CallFunctionObjArgs(py_import,
-                    name, global_dict, empty_dict, list, py_level, NULL);
-                Py_DECREF(py_level);
-                #else
-                module = PyImport_ImportModuleLevelObject(
-                    name, global_dict, empty_dict, list, 1);
-                #endif
-                if (!module) {
-                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
-                        goto bad;
-                    PyErr_Clear();
-                }
-            }
-            level = 0;
-        }
-        #endif
-        if (!module) {
-            #if PY_VERSION_HEX < 0x03030000
-            PyObject *py_level = PyInt_FromLong(level);
-            if (!py_level)
-                goto bad;
-            module = PyObject_CallFunctionObjArgs(py_import,
-                name, global_dict, empty_dict, list, py_level, NULL);
-            Py_DECREF(py_level);
-            #else
-            module = PyImport_ImportModuleLevelObject(
-                name, global_dict, empty_dict, list, level);
-            #endif
-        }
-    }
-bad:
-    #if PY_VERSION_HEX < 0x03030000
-    Py_XDECREF(py_import);
-    #endif
-    Py_XDECREF(empty_list);
-    Py_XDECREF(empty_dict);
-    return module;
-}
-
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
-    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = 0;
+        static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
+    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = (Py_intptr_t) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(Py_intptr_t) < sizeof(long)) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(Py_intptr_t) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(Py_intptr_t) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -5445,21 +5624,21 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
 }
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
-    const npy_int64 neg_one = (npy_int64) -1, const_zero = 0;
+    const npy_int64 neg_one = (npy_int64) -1, const_zero = (npy_int64) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(npy_int64) < sizeof(long)) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(npy_int64) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(npy_int64) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(npy_int64) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(npy_int64) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(npy_int64) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(npy_int64) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -5470,29 +5649,33 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
     }
 }
 
-#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)       \
-    {                                                                     \
-        func_type value = func_value;                                     \
-        if (sizeof(target_type) < sizeof(func_type)) {                    \
-            if (unlikely(value != (func_type) (target_type) value)) {     \
-                func_type zero = 0;                                       \
-                if (is_unsigned && unlikely(value < zero))                \
-                    goto raise_neg_overflow;                              \
-                else                                                      \
-                    goto raise_overflow;                                  \
-            }                                                             \
-        }                                                                 \
-        return (target_type) value;                                       \
-    }
-
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+    {\
+        func_type value = func_value;\
+        if (sizeof(target_type) < sizeof(func_type)) {\
+            if (unlikely(value != (func_type) (target_type) value)) {\
+                func_type zero = 0;\
+                if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+                    return (target_type) -1;\
+                if (is_unsigned && unlikely(value < zero))\
+                    goto raise_neg_overflow;\
+                else\
+                    goto raise_overflow;\
+            }\
+        }\
+        return (target_type) value;\
+    }
+
+#if CYTHON_USE_PYLONG_INTERNALS
   #include "longintrepr.h"
- #endif
 #endif
 
 static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
-    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = 0;
+    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = (Py_intptr_t) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -5509,36 +5692,125 @@ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (Py_intptr_t) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) >= 2 * PyLong_SHIFT) {
+                            return (Py_intptr_t) (((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) >= 3 * PyLong_SHIFT) {
+                            return (Py_intptr_t) (((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) >= 4 * PyLong_SHIFT) {
+                            return (Py_intptr_t) (((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
                 goto raise_neg_overflow;
             }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (Py_intptr_t) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
             if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong(x))
-            } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long long, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (Py_intptr_t) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(Py_intptr_t) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) {
+                            return (Py_intptr_t) (((Py_intptr_t)-1)*(((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) {
+                            return (Py_intptr_t) ((((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) {
+                            return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) {
+                            return (Py_intptr_t) ((((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) {
+                            return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) {
+                            return (Py_intptr_t) ((((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(Py_intptr_t) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyLong_AsLong(x))
-            } else if (sizeof(Py_intptr_t) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long long, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, long, PyLong_AsLong(x))
+            } else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -5827,21 +6099,21 @@ raise_neg_overflow:
 #endif
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
-    const int neg_one = (int) -1, const_zero = 0;
+    const int neg_one = (int) -1, const_zero = (int) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(int) < sizeof(long)) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(int) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(int) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(int) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(int) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -5853,7 +6125,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
 }
 
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
-    const int neg_one = (int) -1, const_zero = 0;
+    const int neg_one = (int) -1, const_zero = (int) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -5870,36 +6142,125 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (int) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+                            return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+                            return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+                            return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
                 goto raise_neg_overflow;
             }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (int) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
             if (sizeof(int) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x))
-            } else if (sizeof(int) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (int) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                            return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                            return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+                            return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+                            return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(int) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x))
-            } else if (sizeof(int) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+            } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -5947,22 +6308,48 @@ raise_neg_overflow:
     return (int) -1;
 }
 
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
+    const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(enum NPY_TYPES) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+        }
+    } else {
+        if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
+                                     little, !is_unsigned);
+    }
+}
+
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
-    const long neg_one = (long) -1, const_zero = 0;
+    const long neg_one = (long) -1, const_zero = (long) 0;
     const int is_unsigned = neg_one > const_zero;
     if (is_unsigned) {
         if (sizeof(long) < sizeof(long)) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(long) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(long) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(long) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(long) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -5974,7 +6361,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
 }
 
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
-    const long neg_one = (long) -1, const_zero = 0;
+    const long neg_one = (long) -1, const_zero = (long) 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
@@ -5991,36 +6378,125 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
 #endif
     if (likely(PyLong_Check(x))) {
         if (is_unsigned) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (long) 0;
+                case  1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+                case 2:
+                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+                            return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+                            return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+                            return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
                 goto raise_neg_overflow;
             }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (long) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
             if (sizeof(long) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x))
-            } else if (sizeof(long) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
+#if CYTHON_USE_PYLONG_INTERNALS
+            const digit* digits = ((PyLongObject*)x)->ob_digit;
             switch (Py_SIZE(x)) {
-                case  0: return 0;
-                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +(((PyLongObject*)x)->ob_digit[0]));
-                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
+                case  0: return (long) 0;
+                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) digits[0])
+                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +digits[0])
+                case -2:
+                    if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 2:
+                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                            return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case -3:
+                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 3:
+                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                            return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case -4:
+                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                            return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
+                case 4:
+                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+                            return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+                        }
+                    }
+                    break;
             }
- #endif
 #endif
             if (sizeof(long) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x))
-            } else if (sizeof(long) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong(x))
+                __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+            } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -6202,7 +6678,7 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
     return __Pyx_PyObject_AsStringAndSize(o, &ignore);
 }
 static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
-#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
     if (
 #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
             __Pyx_sys_getdefaultencoding_not_ascii &&
@@ -6243,7 +6719,7 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
 #endif
     } else
 #endif
-#if !CYTHON_COMPILING_IN_PYPY
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
     if (PyByteArray_Check(o)) {
         *length = PyByteArray_GET_SIZE(o);
         return PyByteArray_AS_STRING(o);
@@ -6273,7 +6749,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
 #else
   if (PyLong_Check(x))
 #endif
-    return Py_INCREF(x), x;
+    return __Pyx_NewRef(x);
   m = Py_TYPE(x)->tp_as_number;
 #if PY_MAJOR_VERSION < 3
   if (m && m->nb_int) {
@@ -6313,18 +6789,55 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
   Py_ssize_t ival;
   PyObject *x;
 #if PY_MAJOR_VERSION < 3
-  if (likely(PyInt_CheckExact(b)))
-      return PyInt_AS_LONG(b);
+  if (likely(PyInt_CheckExact(b))) {
+    if (sizeof(Py_ssize_t) >= sizeof(long))
+        return PyInt_AS_LONG(b);
+    else
+        return PyInt_AsSsize_t(x);
+  }
 #endif
   if (likely(PyLong_CheckExact(b))) {
-    #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
-     #if CYTHON_USE_PYLONG_INTERNALS
-       switch (Py_SIZE(b)) {
-       case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
-       case  0: return 0;
-       case  1: return ((PyLongObject*)b)->ob_digit[0];
-       }
-     #endif
+    #if CYTHON_USE_PYLONG_INTERNALS
+    const digit* digits = ((PyLongObject*)b)->ob_digit;
+    const Py_ssize_t size = Py_SIZE(b);
+    if (likely(__Pyx_sst_abs(size) <= 1)) {
+        ival = likely(size) ? digits[0] : 0;
+        if (size == -1) ival = -ival;
+        return ival;
+    } else {
+      switch (size) {
+         case 2:
+           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -2:
+           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case 3:
+           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -3:
+           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case 4:
+           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+             return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+         case -4:
+           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+             return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+           }
+           break;
+      }
+    }
     #endif
     return PyLong_AsSsize_t(b);
   }
diff --git a/skbio/stats/composition.py b/skbio/stats/composition.py
index e0b6c67..dd35507 100644
--- a/skbio/stats/composition.py
+++ b/skbio/stats/composition.py
@@ -18,20 +18,20 @@ analyzed using Aitchison geometry. [1]_
 
 However, in this framework, standard real Euclidean operations such as
 addition and multiplication no longer apply. Only operations such as
-perturbation and power can be used to manipulate this data. [1]_
+perturbation and power can be used to manipulate this data.
 
 This module allows two styles of manipulation of compositional data.
 Compositional data can be analyzed using perturbation and power
 operations, which can be useful for simulation studies. The
 alternative strategy is to transform compositional data into the real
-space.  Right now, the centre log ratio transform (clr) [1]_ can be
-used to accomplish this.  This transform can be useful for performing
-standard statistical tools such as parametric hypothesis testing,
-regressions and more.
+space.  Right now, the centre log ratio transform (clr) and
+the isometric log ratio transform (ilr) [2]_ can be used to accomplish
+this. This transform can be useful for performing standard statistical
+tools such as parametric hypothesis testing, regressions and more.
 
 The major caveat of using this framework is dealing with zeros.  In
 the Aitchison geometry, only compositions with nonzero components can
-be considered. The multiplicative replacement technique [2]_ can be
+be considered. The multiplicative replacement technique [3]_ can be
 used to substitute these zeros with small pseudocounts without
 introducing major distortions to the data.
 
@@ -46,14 +46,25 @@ Functions
    perturb
    perturb_inv
    power
+   inner
    clr
+   clr_inv
+   ilr
+   ilr_inv
    centralize
+   ancom
 
 References
 ----------
-.. [1] V. Pawlowsky-Glahn. "Lecture Notes on Compositional Data Analysis"
-.. [2] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
-       Compositional Data Sets Using Nonparametric Imputation"
+.. [1] V. Pawlowsky-Glahn, "Lecture Notes on Compositional Data Analysis"
+   (2007)
+
+.. [2] J. J. Egozcue.,  "Isometric Logratio Transformations for
+   Compositional Data Analysis" Mathematical Geology, 35.3 (2003)
+
+.. [3] J. A. Martin-Fernandez,  "Dealing With Zeros and Missing Values in
+   Compositional Data Sets Using Nonparametric Imputation",
+   Mathematical Geology, 35.3 (2003)
 
 
 Examples
@@ -90,8 +101,8 @@ array([ 0.25,  0.25,  0.5 ])
 
 from __future__ import absolute_import, division, print_function
 import numpy as np
-import scipy.stats as ss
-
+import pandas as pd
+import scipy.stats
 from skbio.util._decorator import experimental
 
 
@@ -195,10 +206,16 @@ def perturb(x, y):
     Performs the perturbation operation.
 
     This operation is defined as
-    :math:`x \oplus y = C[x_1 y_1, ..., x_D y_D]`
+
+    .. math::
+        x \oplus y = C[x_1 y_1, \ldots, x_D y_D]
 
     :math:`C[x]` is the closure operation defined as
-    :math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
+
+    .. math::
+        C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
+                     \frac{x_D}{\sum_{i=1}^{D} x_i} \right]
+
     for some :math:`D` dimensional real vector :math:`x` and
     :math:`D` is the number of components for every composition.
 
@@ -239,10 +256,17 @@ def perturb_inv(x, y):
     Performs the inverse perturbation operation.
 
     This operation is defined as
-    :math:`x \ominus y = C[x_1 y_1^{-1}, ..., x_D y_D^{-1}]`
+
+    .. math::
+        x \ominus y = C[x_1 y_1^{-1}, \ldots, x_D y_D^{-1}]
 
     :math:`C[x]` is the closure operation defined as
-    :math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
+
+    .. math::
+        C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
+                     \frac{x_D}{\sum_{i=1}^{D} x_i} \right]
+
+
     for some :math:`D` dimensional real vector :math:`x` and
     :math:`D` is the number of components for every composition.
 
@@ -271,7 +295,6 @@ def perturb_inv(x, y):
     >>> y = np.array([1./6,1./6,1./3,1./3])
     >>> perturb_inv(x,y)
     array([ 0.14285714,  0.42857143,  0.28571429,  0.14285714])
-
     """
     x, y = closure(x), closure(y)
     return closure(x / y)
@@ -283,10 +306,16 @@ def power(x, a):
     Performs the power operation.
 
     This operation is defined as follows
-    :math:`x \odot a = C[x_1^a, ..., x_D^a]`
+
+    .. math::
+        `x \odot a = C[x_1^a, \ldots, x_D^a]
 
     :math:`C[x]` is the closure operation defined as
-    :math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
+
+    .. math::
+        C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
+                     \frac{x_D}{\sum_{i=1}^{D} x_i} \right]
+
     for some :math:`D` dimensional real vector :math:`x` and
     :math:`D` is the number of components for every composition.
 
@@ -319,16 +348,68 @@ def power(x, a):
 
 
 @experimental(as_of="0.4.0")
+def inner(x, y):
+    r"""
+    Calculates the Aitchson inner product.
+
+    This inner product is defined as follows
+
+    .. math::
+        \langle x, y \rangle_a =
+        \frac{1}{2D} \sum\limits_{i=1}^{D} \sum\limits_{j=1}^{D}
+        \ln\left(\frac{x_i}{x_j}\right) \ln\left(\frac{y_i}{y_j}\right)
+
+    Parameters
+    ----------
+    x : array_like
+        a matrix of proportions where
+        rows = compositions and
+        columns = components
+    y : array_like
+        a matrix of proportions where
+        rows = compositions and
+        columns = components
+
+    Returns
+    -------
+    numpy.ndarray
+         inner product result
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import inner
+    >>> x = np.array([.1, .3, .4, .2])
+    >>> y = np.array([.2, .4, .2, .2])
+    >>> inner(x, y)
+    0.21078524737545556
+    """
+    x = closure(x)
+    y = closure(y)
+    a, b = clr(x), clr(y)
+    return a.dot(b.T)
+
+
+ at experimental(as_of="0.4.0")
 def clr(mat):
     r"""
     Performs centre log ratio transformation.
 
     This function transforms compositions from Aitchison geometry to
-    the real space. This transformation is an isometry, but not an
-    isomorphism. It is defined for a composition :math:`x` as follows:
+    the real space. The :math:`clr` transform is both an isometry and an
+    isomorphism defined on the following spaces
+
+    :math:`clr: S^D \rightarrow U`
+
+    where :math:`U=
+    \{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
 
-    :math:`clr(x) = ln[\frac{x_1}{g_m(x)}, ..., \frac{x_D}{g_m(x)}]`
-    where :math:`g_m(x) = (\prod_{i=1}^{D} x_i)^{1/D}` is the geometric
+    It is defined for a composition :math:`x` as follows:
+
+    .. math::
+        clr(x) = \ln\left[\frac{x_1}{g_m(x)}, \ldots, \frac{x_D}{g_m(x)}\right]
+
+    where :math:`g_m(x) = (\prod\limits_{i=1}^{D} x_i)^{1/D}` is the geometric
     mean of :math:`x`.
 
     Parameters
@@ -347,7 +428,7 @@ def clr(mat):
     --------
     >>> import numpy as np
     >>> from skbio.stats.composition import clr
-    >>> x = np.array([.1,.3,.4, .2])
+    >>> x = np.array([.1, .3, .4, .2])
     >>> clr(x)
     array([-0.79451346,  0.30409883,  0.5917809 , -0.10136628])
 
@@ -359,8 +440,153 @@ def clr(mat):
 
 
 @experimental(as_of="0.4.0")
+def clr_inv(mat):
+    r"""
+    Performs inverse centre log ratio transformation.
+
+    This function transforms compositions from the real space to
+    Aitchison geometry. The :math:`clr^{-1}` transform is both an isometry,
+    and an isomorphism defined on the following spaces
+
+    :math:`clr^{-1}: U \rightarrow S^D`
+
+    where :math:`U=
+    \{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
+
+    This transformation is defined as follows
+
+    .. math::
+        clr^{-1}(x) = C[\exp( x_1, \ldots, x_D)]
+
+    Parameters
+    ----------
+    mat : array_like, float
+       a matrix of real values where
+       rows = transformed compositions and
+       columns = components
+
+    Returns
+    -------
+    numpy.ndarray
+         inverse clr transformed matrix
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import clr_inv
+    >>> x = np.array([.1, .3, .4, .2])
+    >>> clr_inv(x)
+    array([ 0.21383822,  0.26118259,  0.28865141,  0.23632778])
+
+    """
+    return closure(np.exp(mat))
+
+
+ at experimental(as_of="0.4.0")
+def ilr(mat, basis=None, check=True):
+    r"""
+    Performs isometric log ratio transformation.
+
+    This function transforms compositions from Aitchison simplex to
+    the real space. The :math: ilr` transform is both an isometry,
+    and an isomorphism defined on the following spaces
+
+    :math:`ilr: S^D \rightarrow \mathbb{R}^{D-1}`
+
+    The ilr transformation is defined as follows
+
+    .. math::
+        ilr(x) =
+        [\langle x, e_1 \rangle_a, \ldots, \langle x, e_{D-1} \rangle_a]
+
+    where :math:`[e_1,\ldots,e_{D-1}]` is an orthonormal basis in the simplex.
+
+    If an orthornormal basis isn't specified, the J. J. Egozcue orthonormal
+    basis derived from Gram-Schmidt orthogonalization will be used by
+    default.
+
+    Parameters
+    ----------
+    mat: numpy.ndarray
+       a matrix of proportions where
+       rows = compositions and
+       columns = components
+
+    basis: numpy.ndarray, float, optional
+        orthonormal basis for Aitchison simplex
+        defaults to J.J.Egozcue orthonormal basis
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import ilr
+    >>> x = np.array([.1, .3, .4, .2])
+    >>> ilr(x)
+    array([-0.7768362 , -0.68339802,  0.11704769])
+
+    """
+    mat = closure(mat)
+    if basis is None:
+        basis = clr_inv(_gram_schmidt_basis(mat.shape[-1]))
+    elif check:
+        _check_orthogonality(basis)
+    return inner(mat, basis)
+
+
+ at experimental(as_of="0.4.0")
+def ilr_inv(mat, basis=None, check=True):
+    r"""
+    Performs inverse isometric log ratio transform.
+
+    This function transforms compositions from the real space to
+    Aitchison geometry. The :math:`ilr^{-1}` transform is both an isometry,
+    and an isomorphism defined on the following spaces
+
+    :math:`ilr^{-1}: \mathbb{R}^{D-1} \rightarrow S^D`
+
+    The inverse ilr transformation is defined as follows
+
+    .. math::
+        ilr^{-1}(x) = \bigoplus\limits_{i=1}^{D-1} x \odot e_i
+
+    where :math:`[e_1,\ldots, e_{D-1}]` is an orthonormal basis in the simplex.
+
+    If an orthornormal basis isn't specified, the J. J. Egozcue orthonormal
+    basis derived from Gram-Schmidt orthogonalization will be used by
+    default.
+
+
+    Parameters
+    ----------
+    mat: numpy.ndarray, float
+       a matrix of transformed proportions where
+       rows = compositions and
+       columns = components
+
+    basis: numpy.ndarray, float, optional
+        orthonormal basis for Aitchison simplex
+        defaults to J.J.Egozcue orthonormal basis
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import ilr
+    >>> x = np.array([.1, .3, .6,])
+    >>> ilr_inv(x)
+    array([ 0.34180297,  0.29672718,  0.22054469,  0.14092516])
+
+    """
+
+    if basis is None:
+        basis = _gram_schmidt_basis(mat.shape[-1] + 1)
+    elif check:
+        _check_orthogonality(basis)
+    return clr_inv(np.dot(mat, basis))
+
+
+ at experimental(as_of="0.4.0")
 def centralize(mat):
-    """Center data around its geometric average.
+    r"""Center data around its geometric average.
 
     Parameters
     ----------
@@ -385,5 +611,369 @@ def centralize(mat):
 
     """
     mat = closure(mat)
-    cen = ss.gmean(mat, axis=0)
+    cen = scipy.stats.gmean(mat, axis=0)
     return perturb_inv(mat, cen)
+
+
+ at experimental(as_of="0.4.1")
+def ancom(table, grouping,
+          alpha=0.05,
+          tau=0.02,
+          theta=0.1,
+          multiple_comparisons_correction=None,
+          significance_test=None):
+    r""" Performs a differential abundance test using ANCOM.
+
+    This is done by calculating pairwise log ratios between all features
+    and performing a significance test to determine if there is a significant
+    difference in feature ratios with respect to the variable of interest.
+
+    In an experiment with only two treatments, this test tests the following
+    hypothesis for feature :math:`i`
+
+    .. math::
+
+        H_{0i}: \mathbb{E}[\ln(u_i^{(1)})] = \mathbb{E}[\ln(u_i^{(2)})]
+
+    where :math:`u_i^{(1)}` is the mean abundance for feature :math:`i` in the
+    first group and :math:`u_i^{(2)}` is the mean abundance for feature
+    :math:`i` in the second group.
+
+    Parameters
+    ----------
+    table : pd.DataFrame
+        A 2D matrix of strictly positive values (i.e. counts or proportions)
+        where the rows correspond to samples and the columns correspond to
+        features.
+    grouping : pd.Series
+        Vector indicating the assignment of samples to groups.  For example,
+        these could be strings or integers denoting which group a sample
+        belongs to.  It must be the same length as the samples in `table`.
+        The index must be the same on `table` and `grouping` but need not be
+        in the same order.
+    alpha : float, optional
+        Significance level for each of the statistical tests.
+        This can can be anywhere between 0 and 1 exclusive.
+    tau : float, optional
+        A constant used to determine an appropriate cutoff.
+        A value close to zero indicates a conservative cutoff.
+        This can can be anywhere between 0 and 1 exclusive.
+    theta : float, optional
+        Lower bound for the proportion for the W-statistic.
+        If all W-statistics are lower than theta, then no features
+        will be detected to be differentially significant.
+        This can can be anywhere between 0 and 1 exclusive.
+    multiple_comparisons_correction : {None, 'holm-bonferroni'}, optional
+        The multiple comparison correction procedure to run.  If None,
+        then no multiple comparison correction procedure will be run.
+        If 'holm-boniferroni' is specified, then the Holm-Boniferroni
+        procedure [1]_ will be run.
+    significance_test : function, optional
+        A statistical significance function to test for significance between
+        classes.  This function must be able to accept at least two 1D
+        array_like arguments of floats and returns a test statistic and a
+        p-value. By default ``scipy.stats.f_oneway`` is used.
+
+    Returns
+    -------
+    pd.DataFrame
+        A table of features, their W-statistics and whether the null hypothesis
+        is rejected.
+
+        `"W"` is the W-statistic, or number of features that a single feature
+        is tested to be significantly different against.
+
+        `"reject"` indicates if feature is significantly different or not.
+
+    See Also
+    --------
+    multiplicative_replacement
+    scipy.stats.ttest_ind
+    scipy.stats.f_oneway
+    scipy.stats.wilcoxon
+    scipy.stats.kruskal
+
+    Notes
+    -----
+    The developers of this method recommend the following significance tests
+    ([2]_, Supplementary File 1, top of page 11): the standard parametric
+    t-test (``scipy.stats.ttest_ind``) or one-way ANOVA
+    (``scipy.stats.f_oneway``) if the number of groups is greater
+    than 2, or non-parametric variants such as Wilcoxon rank sum
+    (``scipy.stats.wilcoxon``) or Kruskal-Wallis (``scipy.stats.kruskal``)
+    if the number of groups is greater than 2.  Because one-way ANOVA is
+    equivalent to the standard t-test when the number of groups is two,
+    we default to ``scipy.stats.f_oneway`` here, which can be used when
+    there are two or more groups.  Users should refer to the documentation
+    of these tests in SciPy to understand the assumptions made by each test.
+
+    This method cannot handle any zero counts as input, since the logarithm
+    of zero cannot be computed.  While this is an unsolved problem, many
+    studies have shown promising results by replacing the zeros with pseudo
+    counts. This can be also be done via the ``multiplicative_replacement``
+    method.
+
+    References
+    ----------
+    .. [1] Holm, S. "A simple sequentially rejective multiple test procedure".
+       Scandinavian Journal of Statistics (1979), 6.
+    .. [2] Mandal et al. "Analysis of composition of microbiomes: a novel
+       method for studying microbial composition", Microbial Ecology in Health
+       & Disease, (2015), 26.
+
+    Examples
+    --------
+    First import all of the necessary modules:
+
+    >>> from skbio.stats.composition import ancom
+    >>> import pandas as pd
+
+    Now let's load in a pd.DataFrame with 6 samples and 7 unknown bacteria:
+
+    >>> table = pd.DataFrame([[12, 11, 10, 10, 10, 10, 10],
+    ...                       [9,  11, 12, 10, 10, 10, 10],
+    ...                       [1,  11, 10, 11, 10, 5,  9],
+    ...                       [22, 21, 9,  10, 10, 10, 10],
+    ...                       [20, 22, 10, 10, 13, 10, 10],
+    ...                       [23, 21, 14, 10, 10, 10, 10]],
+    ...                      index=['s1','s2','s3','s4','s5','s6'],
+    ...                      columns=['b1','b2','b3','b4','b5','b6','b7'])
+
+    Then create a grouping vector.  In this scenario, there
+    are only two classes, and suppose these classes correspond to the
+    treatment due to a drug and a control.  The first three samples
+    are controls and the last three samples are treatments.
+
+    >>> grouping = pd.Series([0, 0, 0, 1, 1, 1],
+    ...                      index=['s1','s2','s3','s4','s5','s6'])
+
+    Now run ``ancom`` and see if there are any features that have any
+    significant differences between the treatment and the control.
+
+    >>> results = ancom(table, grouping)
+    >>> results['W']
+    b1    0
+    b2    4
+    b3    1
+    b4    1
+    b5    1
+    b6    0
+    b7    1
+    Name: W, dtype: int64
+
+    The W-statistic is the number of features that a single feature is tested
+    to be significantly different against.  In this scenario, `b2` was detected
+    to have significantly different abundances compared to four of the other
+    species. To summarize the results from the W-statistic, let's take a look
+    at the results from the hypothesis test:
+
+    >>> results['reject']
+    b1    False
+    b2     True
+    b3    False
+    b4    False
+    b5    False
+    b6    False
+    b7    False
+    Name: reject, dtype: bool
+
+    From this we can conclude that only `b2` was significantly
+    different between the treatment and the control.
+
+    """
+
+    if not isinstance(table, pd.DataFrame):
+        raise TypeError('`table` must be a `pd.DataFrame`, '
+                        'not %r.' % type(table).__name__)
+    if not isinstance(grouping, pd.Series):
+        raise TypeError('`grouping` must be a `pd.Series`,'
+                        ' not %r.' % type(grouping).__name__)
+
+    if np.any(table <= 0):
+        raise ValueError('Cannot handle zeros or negative values in `table`. '
+                         'Use pseudo counts or ``multiplicative_replacement``.'
+                         )
+
+    if not 0 < alpha < 1:
+        raise ValueError('`alpha`=%f is not within 0 and 1.' % alpha)
+
+    if not 0 < tau < 1:
+        raise ValueError('`tau`=%f is not within 0 and 1.' % tau)
+
+    if not 0 < theta < 1:
+        raise ValueError('`theta`=%f is not within 0 and 1.' % theta)
+
+    if multiple_comparisons_correction is not None:
+        if multiple_comparisons_correction != 'holm-bonferroni':
+            raise ValueError('%r is not an available option for '
+                             '`multiple_comparisons_correction`.'
+                             % multiple_comparisons_correction)
+
+    if (grouping.isnull()).any():
+        raise ValueError('Cannot handle missing values in `grouping`.')
+
+    if (table.isnull()).any().any():
+        raise ValueError('Cannot handle missing values in `table`.')
+
+    groups, _grouping = np.unique(grouping, return_inverse=True)
+    grouping = pd.Series(_grouping, index=grouping.index)
+    num_groups = len(groups)
+
+    if num_groups == len(grouping):
+        raise ValueError(
+            "All values in `grouping` are unique. This method cannot "
+            "operate on a grouping vector with only unique values (e.g., "
+            "there are no 'within' variance because each group of samples "
+            "contains only a single sample).")
+
+    if num_groups == 1:
+        raise ValueError(
+            "All values the `grouping` are the same. This method cannot "
+            "operate on a grouping vector with only a single group of samples"
+            "(e.g., there are no 'between' variance because there is only a "
+            "single group).")
+
+    if significance_test is None:
+        significance_test = scipy.stats.f_oneway
+
+    table_index_len = len(table.index)
+    grouping_index_len = len(grouping.index)
+    mat, cats = table.align(grouping, axis=0, join='inner')
+    if (len(mat) != table_index_len or len(cats) != grouping_index_len):
+        raise ValueError('`table` index and `grouping` '
+                         'index must be consistent.')
+
+    n_feat = mat.shape[1]
+
+    _logratio_mat = _log_compare(mat.values, cats.values, significance_test)
+    logratio_mat = _logratio_mat + _logratio_mat.T
+
+    # Multiple comparisons
+    if multiple_comparisons_correction == 'holm-bonferroni':
+        logratio_mat = np.apply_along_axis(_holm_bonferroni,
+                                           1, logratio_mat)
+    np.fill_diagonal(logratio_mat, 1)
+    W = (logratio_mat < alpha).sum(axis=1)
+    c_start = W.max() / n_feat
+    if c_start < theta:
+        reject = np.zeros_like(W, dtype=bool)
+    else:
+        # Select appropriate cutoff
+        cutoff = c_start - np.linspace(0.05, 0.25, 5)
+        prop_cut = np.array([(W > n_feat*cut).mean() for cut in cutoff])
+        dels = np.abs(prop_cut - np.roll(prop_cut, -1))
+        dels[-1] = 0
+
+        if (dels[0] < tau) and (dels[1] < tau) and (dels[2] < tau):
+            nu = cutoff[1]
+        elif (dels[0] >= tau) and (dels[1] < tau) and (dels[2] < tau):
+            nu = cutoff[2]
+        elif (dels[1] >= tau) and (dels[2] < tau) and (dels[3] < tau):
+            nu = cutoff[3]
+        else:
+            nu = cutoff[4]
+        reject = (W >= nu*n_feat)
+    labs = mat.columns
+    return pd.DataFrame({'W': pd.Series(W, index=labs),
+                         'reject': pd.Series(reject, index=labs)})
+
+
+def _holm_bonferroni(p):
+    """ Performs Holm-Bonferroni correction for pvalues
+    to account for multiple comparisons
+
+    Parameters
+    ---------
+    p: numpy.array
+        array of pvalues
+
+    Returns
+    -------
+    numpy.array
+        corrected pvalues
+    """
+    K = len(p)
+    sort_index = -np.ones(K, dtype=np.int64)
+    sorted_p = np.sort(p)
+    sorted_p_adj = sorted_p*(K-np.arange(K))
+    for j in range(K):
+        idx = (p == sorted_p[j]) & (sort_index < 0)
+        num_ties = len(sort_index[idx])
+        sort_index[idx] = np.arange(j, (j+num_ties), dtype=np.int64)
+
+    sorted_holm_p = [min([max(sorted_p_adj[:k]), 1])
+                     for k in range(1, K+1)]
+    holm_p = [sorted_holm_p[sort_index[k]] for k in range(K)]
+    return holm_p
+
+
+def _log_compare(mat, cats,
+                 significance_test=scipy.stats.ttest_ind):
+    """ Calculates pairwise log ratios between all features and performs a
+    significiance test (i.e. t-test) to determine if there is a significant
+    difference in feature ratios with respect to the variable of interest.
+
+    Parameters
+    ----------
+    mat: np.array
+       rows correspond to samples and columns correspond to
+       features (i.e. OTUs)
+    cats: np.array, float
+       Vector of categories
+    significance_test: function
+        statistical test to run
+
+    Returns:
+    --------
+    log_ratio : np.array
+        log ratio pvalue matrix
+    """
+    r, c = mat.shape
+    log_ratio = np.zeros((c, c))
+    log_mat = np.log(mat)
+    cs = np.unique(cats)
+
+    def func(x):
+        return significance_test(*[x[cats == k] for k in cs])
+
+    for i in range(c-1):
+        ratio = (log_mat[:, i].T - log_mat[:, i+1:].T).T
+        m, p = np.apply_along_axis(func,
+                                   axis=0,
+                                   arr=ratio)
+        log_ratio[i, i+1:] = np.squeeze(np.array(p.T))
+    return log_ratio
+
+
+def _gram_schmidt_basis(n):
+    """
+    Builds clr transformed basis derived from
+    gram schmidt orthogonalization
+
+    Parameters
+    ----------
+    n : int
+        Dimension of the Aitchison simplex
+    """
+    basis = np.zeros((n, n-1))
+    for j in range(n-1):
+        i = j + 1
+        e = np.array([(1/i)]*i + [-1] +
+                     [0]*(n-i-1))*np.sqrt(i/(i+1))
+        basis[:, j] = e
+    return basis.T
+
+
+def _check_orthogonality(basis):
+    """
+    Checks to see if basis is truly orthonormal in the
+    Aitchison simplex
+
+    Parameters
+    ----------
+    basis: numpy.ndarray
+        basis in the Aitchison simplex
+    """
+    if not np.allclose(inner(basis, basis), np.identity(len(basis)),
+                       rtol=1e-4, atol=1e-6):
+        raise ValueError("Aitchison basis is not orthonormal")
diff --git a/skbio/stats/distance/__init__.py b/skbio/stats/distance/__init__.py
index 5a9cdf3..1f68724 100644
--- a/skbio/stats/distance/__init__.py
+++ b/skbio/stats/distance/__init__.py
@@ -77,7 +77,7 @@ Load a distance matrix from the file:
 
 >>> from io import StringIO
 >>> from skbio import DistanceMatrix
->>> dm_fh = StringIO(u"\\ta\\tb\\tc\\n"
+>>> dm_fh = StringIO("\\ta\\tb\\tc\\n"
 ...                  "a\\t0.0\\t0.5\\t1.0\\n"
 ...                  "b\\t0.5\\t0.0\\t0.75\\n"
 ...                  "c\\t1.0\\t0.75\\t0.0\\n")
@@ -85,7 +85,7 @@ Load a distance matrix from the file:
 >>> print(dm)
 3x3 distance matrix
 IDs:
-u'a', u'b', u'c'
+'a', 'b', 'c'
 Data:
 [[ 0.    0.5   1.  ]
  [ 0.5   0.    0.75]
diff --git a/skbio/stats/distance/_base.py b/skbio/stats/distance/_base.py
index 4ffc49d..b1a72c7 100644
--- a/skbio/stats/distance/_base.py
+++ b/skbio/stats/distance/_base.py
@@ -21,7 +21,8 @@ from scipy.spatial.distance import squareform
 from skbio._base import SkbioObject
 from skbio.stats._misc import _pprint_strs
 from skbio.util import find_duplicates
-from skbio.util._decorator import experimental
+from skbio.util._decorator import experimental, classonlymethod
+from skbio.util._misc import resolve_key
 
 
 class DissimilarityMatrixError(Exception):
@@ -61,8 +62,10 @@ class DissimilarityMatrix(SkbioObject):
     data : array_like or DissimilarityMatrix
         Square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
         (floats), or a structure that can be converted to a ``numpy.ndarray``
-        using ``numpy.asarray``. Can instead be a `DissimilarityMatrix` (or
-        subclass) instance, in which case the instance's data will be used.
+        using ``numpy.asarray`` or a one-dimensional vector of dissimilarities
+        (floats), as defined by `scipy.spatial.distance.squareform`. Can
+        instead be a `DissimilarityMatrix` (or subclass) instance,
+        in which case the instance's data will be used.
         Data will be converted to a float ``dtype`` if necessary. A copy will
         *not* be made if already a ``numpy.ndarray`` with a float ``dtype``.
     ids : sequence of str, optional
@@ -74,6 +77,7 @@ class DissimilarityMatrix(SkbioObject):
     See Also
     --------
     DistanceMatrix
+    scipy.spatial.distance.squareform
 
     Notes
     -----
@@ -94,9 +98,11 @@ class DissimilarityMatrix(SkbioObject):
     @experimental(as_of="0.4.0")
     def __init__(self, data, ids=None):
         if isinstance(data, DissimilarityMatrix):
+            ids = data.ids if ids is None else ids
             data = data.data
         data = np.asarray(data, dtype='float')
-
+        if data.ndim == 1:
+            data = squareform(data, force='tomatrix', checks=False)
         if ids is None:
             ids = (str(i) for i in range(data.shape[0]))
         ids = tuple(ids)
@@ -363,12 +369,18 @@ class DissimilarityMatrix(SkbioObject):
         ax.set_xticks(ticks, minor=False)
         ax.set_yticks(ticks, minor=False)
 
+        # Ensure there is no white border around the heatmap by manually
+        # setting the limits
+        ax.set_ylim(0, len(self.ids))
+        ax.set_xlim(0, len(self.ids))
+
         # display data as it is stored in the dissimilarity matrix
         # (default is to have y-axis inverted)
         ax.invert_yaxis()
 
         ax.set_xticklabels(self.ids, rotation=90, minor=False)
         ax.set_yticklabels(self.ids, minor=False)
+
         ax.set_title(title)
 
         return fig
@@ -403,6 +415,31 @@ class DissimilarityMatrix(SkbioObject):
         plt.close(fig)
         return data
 
+    @experimental(as_of="0.4.1")
+    def to_data_frame(self):
+        """Create a ``pandas.DataFrame`` from this ``DissimilarityMatrix``.
+
+        Returns
+        -------
+        pd.DataFrame
+            ``pd.DataFrame`` with IDs on index and columns.
+
+        Examples
+        --------
+        >>> from skbio import DistanceMatrix
+        >>> dm = DistanceMatrix([[0, 1, 2],
+        ...                      [1, 0, 3],
+        ...                      [2, 3, 0]], ids=['a', 'b', 'c'])
+        >>> df = dm.to_data_frame()
+        >>> df
+           a  b  c
+        a  0  1  2
+        b  1  0  3
+        c  2  3  0
+
+        """
+        return pd.DataFrame(data=self.data, index=self.ids, columns=self.ids)
+
     @experimental(as_of="0.4.0")
     def __str__(self):
         """Return a string representation of the dissimilarity matrix.
@@ -648,6 +685,63 @@ class DistanceMatrix(DissimilarityMatrix):
     # Override here, used in superclass __str__
     _matrix_element_name = 'distance'
 
+    @classonlymethod
+    @experimental(as_of="0.4.1")
+    def from_iterable(cls, iterable, metric, key=None, keys=None):
+        """Create DistanceMatrix from all pairs in an iterable given a metric.
+
+        Parameters
+        ----------
+        iterable : iterable
+            Iterable containing objects to compute pairwise distances on.
+        metric : callable
+            A function that takes two arguments and returns a float
+            representing the distance between the two arguments.
+        key : callable or metadata key, optional
+            A function that takes one argument and returns a string
+            representing the id of the element in the distance matrix.
+            Alternatively, a key to a `metadata` property if it exists for
+            each element in the `iterable`. If None, then default ids will be
+            used.
+        keys : iterable, optional
+            An iterable of the same length as `iterable`. Each element will be
+            used as the respective key.
+
+        Returns
+        -------
+        DistanceMatrix
+            The `metric` applied to all pairwise elements in the `iterable`.
+
+        Raises
+        ------
+        ValueError
+            If `key` and `keys` are both provided.
+
+        Notes
+        -----
+        Symmetry and hollowness are assumed when calculating the distances via
+        `metric`. Therefore, distances are only computed for the strictly
+        upper/lower triangle.
+
+        """
+        iterable = list(iterable)
+        if key is not None and keys is not None:
+            raise ValueError("Cannot use both `key` and `keys` at the same"
+                             " time.")
+
+        keys_ = None
+        if key is not None:
+            keys_ = [resolve_key(e, key) for e in iterable]
+        elif keys is not None:
+            keys_ = keys
+
+        dm = np.zeros((len(iterable),) * 2)
+        for i, a in enumerate(iterable):
+            for j, b in enumerate(iterable[:i]):
+                dm[i, j] = dm[j, i] = metric(a, b)
+
+        return cls(dm, keys_)
+
     @experimental(as_of="0.4.0")
     def condensed_form(self):
         """Return an array of distances in condensed format.
diff --git a/skbio/stats/distance/_mantel.py b/skbio/stats/distance/_mantel.py
index fa4a73e..8e2d3f6 100644
--- a/skbio/stats/distance/_mantel.py
+++ b/skbio/stats/distance/_mantel.py
@@ -179,7 +179,7 @@ def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
     a two-sided test with 999 permutations:
 
     >>> coeff, p_value, n = mantel(x, y)
-    >>> round(coeff, 4)
+    >>> print(round(coeff, 4))
     0.7559
 
     Thus, we see a moderate-to-strong positive correlation (:math:`r_M=0.7559`)
@@ -232,7 +232,7 @@ def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
     example where all distance matrix IDs matched:
 
     >>> coeff, p_value, n = mantel(x, y, lookup=lookup)
-    >>> round(coeff, 4)
+    >>> print(round(coeff, 4))
     0.7559
 
     ``mantel`` also accepts input that is ``array_like``. For example, if we
@@ -246,7 +246,7 @@ def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
     ...      [2, 0, 6],
     ...      [7, 6, 0]]
     >>> coeff, p_value, n = mantel(x, y)
-    >>> round(coeff, 4)
+    >>> print(round(coeff, 4))
     0.7559
 
     It is import to note that reordering/matching of IDs (and hence the
diff --git a/skbio/stats/distance/tests/test_base.py b/skbio/stats/distance/tests/test_base.py
index 7c380ec..fabb70e 100644
--- a/skbio/stats/distance/tests/test_base.py
+++ b/skbio/stats/distance/tests/test_base.py
@@ -24,6 +24,7 @@ from skbio.stats.distance import (
     DissimilarityMatrix, randdm)
 from skbio.stats.distance._base import (_preprocess_input,
                                         _run_monte_carlo_stats)
+from skbio.util import assert_data_frame_almost_equal
 
 
 class DissimilarityMatrixTestData(TestCase):
@@ -333,6 +334,26 @@ class DissimilarityMatrixTests(DissimilarityMatrixTestData):
         dm = self.dm_1x1
         self.assertIsInstance(dm.svg, SVG)
 
+    def test_to_data_frame_1x1(self):
+        df = self.dm_1x1.to_data_frame()
+        exp = pd.DataFrame([[0.0]], index=['a'], columns=['a'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_to_data_frame_3x3(self):
+        df = self.dm_3x3.to_data_frame()
+        exp = pd.DataFrame([[0.0, 0.01, 4.2],
+                            [0.01, 0.0, 12.0],
+                            [4.2, 12.0, 0.0]],
+                           index=['a', 'b', 'c'], columns=['a', 'b', 'c'])
+        assert_data_frame_almost_equal(df, exp)
+
+    def test_to_data_frame_default_ids(self):
+        df = DissimilarityMatrix(self.dm_2x2_data).to_data_frame()
+        exp = pd.DataFrame([[0.0, 0.123],
+                            [0.123, 0.0]],
+                           index=['0', '1'], columns=['0', '1'])
+        assert_data_frame_almost_equal(df, exp)
+
     def test_str(self):
         for dm in self.dms:
             obs = str(dm)
@@ -450,6 +471,14 @@ class DistanceMatrixTests(DissimilarityMatrixTestData):
         self.dm_condensed_forms = [np.array([]), np.array([0.123]),
                                    np.array([0.01, 4.2, 12.0])]
 
+    def test_init_from_condensed_form(self):
+        data = [1, 2, 3]
+        exp = DistanceMatrix([[0, 1, 2],
+                              [1, 0, 3],
+                              [2, 3, 0]], ['0', '1', '2'])
+        res = DistanceMatrix(data)
+        self.assertEqual(exp, res)
+
     def test_init_invalid_input(self):
         # Asymmetric.
         data = [[0.0, 2.0], [1.0, 0.0]]
@@ -460,6 +489,53 @@ class DistanceMatrixTests(DissimilarityMatrixTestData):
         with self.assertRaises(DissimilarityMatrixError):
             DistanceMatrix([[1, 2, 3]], ['a'])
 
+    def test_from_iterable_no_key(self):
+        iterable = (x for x in range(4))
+
+        exp = DistanceMatrix([[0, 1, 2, 3],
+                              [1, 0, 1, 2],
+                              [2, 1, 0, 1],
+                              [3, 2, 1, 0]])
+        res = DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a))
+        self.assertEqual(res, exp)
+
+    def test_from_iterable_with_key(self):
+        iterable = (x for x in range(4))
+
+        exp = DistanceMatrix([[0, 1, 2, 3],
+                              [1, 0, 1, 2],
+                              [2, 1, 0, 1],
+                              [3, 2, 1, 0]], ['0', '1', '4', '9'])
+        res = DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a),
+                                           key=lambda x: str(x**2))
+        self.assertEqual(res, exp)
+
+    def test_from_iterable_empty(self):
+        with self.assertRaises(DissimilarityMatrixError):
+            DistanceMatrix.from_iterable([], lambda x: x)
+
+    def test_from_iterable_single(self):
+        exp = DistanceMatrix([[0]])
+        res = DistanceMatrix.from_iterable(["boo"], lambda _: 100)
+        self.assertEqual(res, exp)
+
+    def test_from_iterable_with_keys(self):
+        iterable = (x for x in range(4))
+
+        exp = DistanceMatrix([[0, 1, 2, 3],
+                              [1, 0, 1, 2],
+                              [2, 1, 0, 1],
+                              [3, 2, 1, 0]], ['0', '1', '4', '9'])
+        res = DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a),
+                                           keys=iter(['0', '1', '4', '9']))
+        self.assertEqual(res, exp)
+
+    def test_from_iterable_with_key_and_keys(self):
+        iterable = (x for x in range(4))
+        with self.assertRaises(ValueError):
+            DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a),
+                                         key=str, keys=['1', '2', '3', '4'])
+
     def test_condensed_form(self):
         for dm, condensed in zip(self.dms, self.dm_condensed_forms):
             obs = dm.condensed_form()
diff --git a/skbio/stats/gradient.py b/skbio/stats/gradient.py
index 2dd1a17..2f86a49 100644
--- a/skbio/stats/gradient.py
+++ b/skbio/stats/gradient.py
@@ -68,14 +68,14 @@ Check if we weighted the data or not:
 >>> print(trajectory_results.weighted)
 False
 
-Check the trajectory_results results of one of the categories:
+Check the results of one of the categories:
 
 >>> print(trajectory_results.categories[0].category)
 Treatment
 >>> print(trajectory_results.categories[0].probability)
 0.0118478282382
 
-Check the trajectory_results results of one group of one of the categories:
+Check the results of one group of one of the categories:
 
 >>> print(trajectory_results.categories[0].groups[0].name)
 Control
@@ -473,8 +473,10 @@ class GradientANOVA(object):
         for cat, cat_groups in self._groups.items():
             # Loop through all the category values present in the current
             # category and compute the trajectory for each of them
-            res_by_group = [self._get_group_trajectories(group, sample_ids)
-                            for group, sample_ids in cat_groups.items()]
+            res_by_group = []
+            for group in sorted(cat_groups, key=lambda k: str(k)):
+                res_by_group.append(
+                    self._get_group_trajectories(group, cat_groups[group]))
 
             result.categories.append(_ANOVA_trajectories(cat, res_by_group))
 
diff --git a/skbio/stats/ordination/__init__.py b/skbio/stats/ordination/__init__.py
index 2231946..27b067c 100644
--- a/skbio/stats/ordination/__init__.py
+++ b/skbio/stats/ordination/__init__.py
@@ -8,18 +8,6 @@ This module contains several ordination methods, including Principal
 Coordinate Analysis, Correspondence Analysis, Redundancy Analysis and
 Canonical Correspondence Analysis.
 
-Classes
--------
-
-.. autosummary::
-   :toctree: generated/
-
-   PCoA
-   CA
-   RDA
-   CCA
-   OrdinationResults
-
 
 Functions
 ---------
@@ -27,18 +15,15 @@ Functions
 .. autosummary::
    :toctree: generated/
 
+   ca
+   pcoa
+   cca
+   rda
    mean_and_std
    corr
    scale
    svd_rank
 
-Testing Utilities
------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   assert_ordination_results_equal
 
 Examples
 --------
@@ -48,6 +33,10 @@ abundance in different sites (`Y`, the response variables) and
 environmental variables (`X`, the explanatory variables).
 
 >>> import numpy as np
+>>> import pandas as pd
+
+First we need to construct our explanatory variable dataset `X`.
+
 >>> X = np.array([[1.0, 0.0, 1.0, 0.0],
 ...               [2.0, 0.0, 1.0, 0.0],
 ...               [3.0, 0.0, 1.0, 0.0],
@@ -58,6 +47,17 @@ environmental variables (`X`, the explanatory variables).
 ...               [8.0, 0.0, 0.0, 1.0],
 ...               [9.0, 1.0, 0.0, 0.0],
 ...               [10.0, 0.0, 0.0, 1.0]])
+>>> transects = ['depth', 'substrate_coral', 'substrate_sand',
+...              'substrate_other']
+>>> sites = ['site1', 'site2', 'site3', 'site4', 'site5', 'site6', 'site7',
+...          'site8', 'site9', 'site10']
+>>> X = pd.DataFrame(X, sites, transects)
+
+Then we need to create a dataframe with the information about the species
+observed at different sites.
+
+>>> species = ['specie1', 'specie2', 'specie3', 'specie4', 'specie5',
+...            'specie6', 'specie7', 'specie8', 'specie9']
 >>> Y = np.array([[1, 0, 0, 0, 0, 0, 2, 4, 4],
 ...               [0, 0, 0, 0, 0, 0, 5, 6, 1],
 ...               [0, 1, 0, 0, 0, 0, 0, 2, 3],
@@ -68,29 +68,33 @@ environmental variables (`X`, the explanatory variables).
 ...               [7, 8, 0, 0, 4, 3, 6, 6, 4],
 ...               [7, 9, 10, 13, 0, 0, 6, 2, 0],
 ...               [5, 10, 0, 0, 2, 4, 0, 1, 3]])
+>>> Y = pd.DataFrame(Y, sites, species)
+
+We can now perform canonical correspondence analysis. Matrix `X` contains a
+continuous variable (depth) and a categorical one (substrate type) encoded
+using a one-hot encoding.
+>>> from skbio.stats.ordination import cca
+
+We explicitly need to avoid perfect collinearity, so we'll drop one of the
+substrate types (the last column of `X`).
 
-We can now create a CCA object to perform canonical correspondence
-analysis. Matrix `X` contains a continuous variable (depth) and a
-categorical one (substrate type) encoded using a one-hot encoding. We
-explicitly need to avoid perfect collinearity, so we'll drop one of
-the substrate types (the last column of `X`). We also expect to
-increase pandas integration to ease analyses.
-
->>> from skbio.stats.ordination import CCA
->>> ordination_result = CCA(Y, X[:, :-1],
-...                         ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
-...                          'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
-...                         ['Species0', 'Species1', 'Species2', 'Species3',
-...                          'Species4', 'Species5', 'Species6', 'Species7',
-...                          'Species8'])
+>>> del X['substrate_other']
+>>> ordination_result = cca(Y, X, scaling=2)
 
 Exploring the results we see that the first three axes explain about
 80% of all the variance.
 
->>> sc_2 = ordination_result.scores(scaling=2)
->>> print(sc_2.proportion_explained)
-[ 0.46691091  0.23832652  0.10054837  0.10493671  0.04480535  0.02974698
-  0.01263112  0.00156168  0.00053235]
+>>> ordination_result.proportion_explained
+CCA1    0.466911
+CCA2    0.238327
+CCA3    0.100548
+CCA4    0.104937
+CCA5    0.044805
+CCA6    0.029747
+CCA7    0.012631
+CCA8    0.001562
+CCA9    0.000532
+dtype: float64
 
 References
 ----------
@@ -112,15 +116,14 @@ from __future__ import absolute_import, division, print_function
 
 from skbio.util import TestRunner
 
-from ._correspondence_analysis import CA
-from ._redundancy_analysis import RDA
-from ._canonical_correspondence_analysis import CCA
-from ._principal_coordinate_analysis import PCoA
-from ._base import OrdinationResults
-from ._utils import (mean_and_std, scale, svd_rank, corr,
-                     assert_ordination_results_equal)
+from ._redundancy_analysis import rda
+from ._correspondence_analysis import ca
+from ._canonical_correspondence_analysis import cca
+from ._principal_coordinate_analysis import pcoa
+from ._utils import (mean_and_std, scale, svd_rank, corr, e_matrix, f_matrix)
 
-__all__ = ['CA', 'RDA', 'CCA', 'PCoA', 'OrdinationResults', 'mean_and_std',
-           'scale', 'svd_rank', 'corr', 'assert_ordination_results_equal']
+__all__ = ['ca', 'rda', 'cca', 'pcoa',
+           'mean_and_std', 'scale', 'svd_rank', 'corr',
+           'e_matrix', 'f_matrix']
 
 test = TestRunner(__file__).test
diff --git a/skbio/stats/ordination/_base.py b/skbio/stats/ordination/_base.py
deleted file mode 100644
index a669a1f..0000000
--- a/skbio/stats/ordination/_base.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-from future.builtins import zip
-
-from functools import partial
-
-import numpy as np
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-from mpl_toolkits.mplot3d import Axes3D
-from IPython.core.pylabtools import print_figure
-from IPython.core.display import Image, SVG
-
-from skbio._base import SkbioObject
-from skbio.stats._misc import _pprint_strs
-from skbio.util._decorator import experimental
-
-# avoid flake8 unused import error
-Axes3D
-
-
-class OrdinationResults(SkbioObject):
-    """Store ordination results, providing serialization and plotting support.
-
-    Stores various components of ordination results. Provides methods for
-    serializing/deserializing results, as well as generation of basic
-    matplotlib 3-D scatterplots. Will automatically display PNG/SVG
-    representations of itself within the IPython Notebook.
-
-    Attributes
-    ----------
-    eigvals : 1-D numpy array
-        The result eigenvalues
-    species : 2-D numpy array
-        The result coordinates for each species
-    site : 2-D numpy array
-        The results coordinates for each site
-    biplot : 2-D numpy array
-        The result biplot coordinates
-    site_constraints : 2-D numpy array
-        The result coordinates for each site constraint
-    proportion_explained : 1-D numpy array
-        The proportion explained by each eigenvector
-    species_ids : list of str
-        The species identifiers
-    site_ids : list of str
-        The site identifiers
-    png
-    svg
-
-    """
-    default_write_format = 'ordination'
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, eigvals, species=None, site=None, biplot=None,
-                 site_constraints=None, proportion_explained=None,
-                 species_ids=None, site_ids=None):
-        self.eigvals = eigvals
-        self.species = species
-        self.site = site
-        self.biplot = biplot
-        self.site_constraints = site_constraints
-        self.proportion_explained = proportion_explained
-        self.species_ids = species_ids
-        self.site_ids = site_ids
-
-    @experimental(as_of="0.4.0")
-    def __str__(self):
-        """Return a string representation of the ordination results.
-
-        String representation lists ordination results attributes and indicates
-        whether or not they are present. If an attribute is present, its
-        dimensions are listed. A truncated list of species and site IDs are
-        included (if they are present).
-
-        Returns
-        -------
-        str
-            String representation of the ordination results.
-
-        """
-        lines = ['Ordination results:']
-
-        attrs = [(self.eigvals, 'Eigvals'),
-                 (self.proportion_explained, 'Proportion explained'),
-                 (self.species, 'Species'),
-                 (self.site, 'Site'),
-                 (self.biplot, 'Biplot'),
-                 (self.site_constraints, 'Site constraints')]
-        for attr, attr_label in attrs:
-            def formatter(e):
-                return 'x'.join(['%d' % s for s in e.shape])
-
-            lines.append(self._format_attribute(attr, attr_label, formatter))
-
-        lines.append(self._format_attribute(self.species_ids, 'Species IDs',
-                                            lambda e: _pprint_strs(e)))
-        lines.append(self._format_attribute(self.site_ids, 'Site IDs',
-                                            lambda e: _pprint_strs(e)))
-
-        return '\n'.join(lines)
-
-    @experimental(as_of="0.4.0")
-    def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
-             title='', cmap=None, s=20):
-        """Create a 3-D scatterplot of ordination results colored by metadata.
-
-        Creates a 3-D scatterplot of the ordination results, where each point
-        represents a site. Optionally, these points can be colored by metadata
-        (see `df` and `column` below).
-
-        Parameters
-        ----------
-        df : pandas.DataFrame, optional
-            ``DataFrame`` containing site metadata. Must be indexed by site ID,
-            and all site IDs in the ordination results must exist in the
-            ``DataFrame``. If ``None``, sites (i.e., points) will not be
-            colored by metadata.
-        column : str, optional
-            Column name in `df` to color sites (i.e., points in the plot) by.
-            Cannot have missing data (i.e., ``np.nan``). `column` can be
-            numeric or categorical. If numeric, all values in the column will
-            be cast to ``float`` and mapped to colors using `cmap`. A colorbar
-            will be included to serve as a legend. If categorical (i.e., not
-            all values in `column` could be cast to ``float``), colors will be
-            chosen for each category using evenly-spaced points along `cmap`. A
-            legend will be included. If ``None``, sites (i.e., points) will not
-            be colored by metadata.
-        axes : iterable of int, optional
-            Indices of site coordinates to plot on the x-, y-, and z-axes. For
-            example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
-            PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
-            Must contain exactly three elements.
-        axis_labels : iterable of str, optional
-            Labels for the x-, y-, and z-axes. If ``None``, labels will be the
-            values of `axes` cast as strings.
-        title : str, optional
-            Plot title.
-        cmap : str or matplotlib.colors.Colormap, optional
-            Name or instance of matplotlib colormap to use for mapping `column`
-            values to colors. If ``None``, defaults to the colormap specified
-            in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
-            are recommended for categorical data, while sequential colormaps
-            (e.g., ``Greys``) are recommended for numeric data. See [1]_ for
-            these colormap classifications.
-        s : scalar or iterable of scalars, optional
-            Size of points. See matplotlib's ``Axes3D.scatter`` documentation
-            for more details.
-
-        Returns
-        -------
-        matplotlib.figure.Figure
-            Figure containing the scatterplot and legend/colorbar if metadata
-            were provided.
-
-        Raises
-        ------
-        ValueError
-            Raised on invalid input, including the following situations:
-
-            - there are not at least three dimensions to plot
-            - there are not exactly three values in `axes`, they are not
-              unique, or are out of range
-            - there are not exactly three values in `axis_labels`
-            - either `df` or `column` is provided without the other
-            - `column` is not in the ``DataFrame``
-            - site IDs in the ordination results are not in `df` or have
-              missing data in `column`
-
-        See Also
-        --------
-        mpl_toolkits.mplot3d.Axes3D.scatter
-
-        Notes
-        -----
-        This method creates basic plots of ordination results, and is intended
-        to provide a quick look at the results in the context of metadata
-        (e.g., from within the IPython Notebook). For more customization and to
-        generate publication-quality figures, we recommend EMPeror [2]_.
-
-        References
-        ----------
-        .. [1] http://matplotlib.org/examples/color/colormaps_reference.html
-        .. [2] EMPeror: a tool for visualizing high-throughput microbial
-           community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
-           Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
-
-        Examples
-        --------
-        .. plot::
-
-           Define a distance matrix with four sites labelled A-D:
-
-           >>> from skbio import DistanceMatrix
-           >>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
-           ...                      [0.21712454, 0., 0.45995501, 0.80332382],
-           ...                      [0.5007512, 0.45995501, 0., 0.65463348],
-           ...                      [0.91769271, 0.80332382, 0.65463348, 0.]],
-           ...                     ['A', 'B', 'C', 'D'])
-
-           Define metadata for each site in a ``pandas.DataFrame``:
-
-           >>> import pandas as pd
-           >>> metadata = {
-           ...     'A': {'body_site': 'skin'},
-           ...     'B': {'body_site': 'gut'},
-           ...     'C': {'body_site': 'gut'},
-           ...     'D': {'body_site': 'skin'}}
-           >>> df = pd.DataFrame.from_dict(metadata, orient='index')
-
-           Run principal coordinate analysis (PCoA) on the distance matrix:
-
-           >>> from skbio.stats.ordination import PCoA
-           >>> pcoa_results = PCoA(dm).scores()
-
-           Plot the ordination results, where each site is colored by body site
-           (a categorical variable):
-
-           >>> fig = pcoa_results.plot(df=df, column='body_site',
-           ...                         title='Sites colored by body site',
-           ...                         cmap='Set1', s=50)
-
-        """
-        # Note: New features should not be added to this method and should
-        # instead be added to EMPeror (http://biocore.github.io/emperor/).
-        # Only bug fixes and minor updates should be made to this method.
-
-        coord_matrix = self.site.T
-        self._validate_plot_axes(coord_matrix, axes)
-
-        # derived from
-        # http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
-        fig = plt.figure()
-        ax = fig.add_subplot(111, projection='3d')
-
-        xs = coord_matrix[axes[0]]
-        ys = coord_matrix[axes[1]]
-        zs = coord_matrix[axes[2]]
-
-        point_colors, category_to_color = self._get_plot_point_colors(
-            df, column, self.site_ids, cmap)
-
-        scatter_fn = partial(ax.scatter, xs, ys, zs, s=s)
-        if point_colors is None:
-            plot = scatter_fn()
-        else:
-            plot = scatter_fn(c=point_colors, cmap=cmap)
-
-        if axis_labels is None:
-            axis_labels = ['%d' % axis for axis in axes]
-        elif len(axis_labels) != 3:
-            raise ValueError("axis_labels must contain exactly three elements "
-                             "(found %d elements)." % len(axis_labels))
-
-        ax.set_xlabel(axis_labels[0])
-        ax.set_ylabel(axis_labels[1])
-        ax.set_zlabel(axis_labels[2])
-        ax.set_xticklabels([])
-        ax.set_yticklabels([])
-        ax.set_zticklabels([])
-        ax.set_title(title)
-
-        # create legend/colorbar
-        if point_colors is not None:
-            if category_to_color is None:
-                fig.colorbar(plot)
-            else:
-                self._plot_categorical_legend(ax, category_to_color)
-
-        return fig
-
-    def _validate_plot_axes(self, coord_matrix, axes):
-        """Validate `axes` against coordinates matrix."""
-        num_dims = coord_matrix.shape[0]
-        if num_dims < 3:
-            raise ValueError("At least three dimensions are required to plot "
-                             "ordination results. There are only %d "
-                             "dimension(s)." % num_dims)
-        if len(axes) != 3:
-            raise ValueError("axes must contain exactly three elements (found "
-                             "%d elements)." % len(axes))
-        if len(set(axes)) != 3:
-            raise ValueError("The values provided for axes must be unique.")
-
-        for idx, axis in enumerate(axes):
-            if axis < 0 or axis >= num_dims:
-                raise ValueError("axes[%d] must be >= 0 and < %d." %
-                                 (idx, num_dims))
-
-    def _get_plot_point_colors(self, df, column, ids, cmap):
-        """Return a list of colors for each plot point given a metadata column.
-
-        If `column` is categorical, additionally returns a dictionary mapping
-        each category (str) to color (used for legend creation).
-
-        """
-        if ((df is None and column is not None) or (df is not None and
-                                                    column is None)):
-            raise ValueError("Both df and column must be provided, or both "
-                             "must be None.")
-        elif df is None and column is None:
-            point_colors, category_to_color = None, None
-        else:
-            if column not in df:
-                raise ValueError("Column '%s' not in data frame." % column)
-
-            col_vals = df.loc[ids, column]
-
-            if col_vals.isnull().any():
-                raise ValueError("One or more IDs in the ordination results "
-                                 "are not in the data frame, or there is "
-                                 "missing data in the data frame's '%s' "
-                                 "column." % column)
-
-            category_to_color = None
-            try:
-                point_colors = col_vals.astype(float)
-            except ValueError:
-                # we have categorical data, so choose a color for each
-                # category, where colors are evenly spaced across the
-                # colormap.
-                # derived from http://stackoverflow.com/a/14887119
-                categories = col_vals.unique()
-                cmap = plt.get_cmap(cmap)
-                category_colors = cmap(np.linspace(0, 1, len(categories)))
-
-                category_to_color = dict(zip(categories, category_colors))
-                point_colors = col_vals.apply(lambda x: category_to_color[x])
-
-            point_colors = point_colors.tolist()
-
-        return point_colors, category_to_color
-
-    def _plot_categorical_legend(self, ax, color_dict):
-        """Add legend to plot using specified mapping of category to color."""
-        # derived from http://stackoverflow.com/a/20505720
-        proxies = []
-        labels = []
-        for category in color_dict:
-            proxy = mpl.lines.Line2D([0], [0], linestyle='none',
-                                     c=color_dict[category], marker='o')
-            proxies.append(proxy)
-            labels.append(category)
-
-        # place legend outside of the axes (centered)
-        # derived from http://matplotlib.org/users/legend_guide.html
-        ax.legend(proxies, labels, numpoints=1, loc=6,
-                  bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
-
-    # Here we define the special repr methods that provide the IPython display
-    # protocol. Code derived from:
-    #     https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
-    #         Custom%20Display%20Logic.ipynb
-    # See licenses/ipython.txt for more details.
-
-    def _repr_png_(self):
-        return self._figure_data('png')
-
-    def _repr_svg_(self):
-        return self._figure_data('svg')
-
-    # We expose the above reprs as properties, so that the user can see them
-    # directly (since otherwise the client dictates which one it shows by
-    # default)
-    @property
-    @experimental(as_of="0.4.0")
-    def png(self):
-        """Display basic 3-D scatterplot in IPython Notebook as PNG."""
-        return Image(self._repr_png_(), embed=True)
-
-    @property
-    @experimental(as_of="0.4.0")
-    def svg(self):
-        """Display basic 3-D scatterplot in IPython Notebook as SVG."""
-        return SVG(self._repr_svg_())
-
-    def _figure_data(self, format):
-        fig = self.plot()
-        data = print_figure(fig, format)
-        # We MUST close the figure, otherwise IPython's display machinery
-        # will pick it up and send it as output, resulting in a double display
-        plt.close(fig)
-        return data
-
-    def _format_attribute(self, attr, attr_label, formatter):
-        if attr is None:
-            formatted_attr = 'N/A'
-        else:
-            formatted_attr = formatter(attr)
-        return '\t%s: %s' % (attr_label, formatted_attr)
-
-
-class Ordination(object):
-    short_method_name = 'Overwrite in subclass!'
-    long_method_name = 'Overwrite in subclass!'
diff --git a/skbio/stats/ordination/_canonical_correspondence_analysis.py b/skbio/stats/ordination/_canonical_correspondence_analysis.py
index 3cc573a..9c7a213 100644
--- a/skbio/stats/ordination/_canonical_correspondence_analysis.py
+++ b/skbio/stats/ordination/_canonical_correspondence_analysis.py
@@ -9,23 +9,25 @@
 from __future__ import absolute_import, division, print_function
 
 import numpy as np
+import pandas as pd
+from scipy.linalg import svd, lstsq
 
-from ._base import Ordination, OrdinationResults
+from skbio._base import OrdinationResults
 from ._utils import corr, svd_rank, scale
 from skbio.util._decorator import experimental
 
 
-class CCA(Ordination):
-    r"""Compute constrained (also known as canonical) correspondence
+ at experimental(as_of="0.4.0")
+def cca(y, x, scaling=1):
+    r"""Compute canonical (also known as constrained) correspondence
     analysis.
 
     Canonical (or constrained) correspondence analysis is a
     multivariate ordination technique. It appeared in community
     ecology [1]_ and relates community composition to the variation in
     the environment (or in other factors). It works from data on
-    abundances or counts of individuals and environmental variables,
-    and outputs ordination axes that maximize niche separation among
-    species.
+    abundances or counts of samples and constraints variables,
+    and outputs ordination axes that maximize sample separation among species.
 
     It is better suited to extract the niches of taxa than linear
     multivariate methods because it assumes unimodal response curves
@@ -38,16 +40,35 @@ class CCA(Ordination):
 
     Parameters
     ----------
-    Y : array_like Community data matrix of shape (n, m): a
-        contingency table for m species at n sites.
-    X : array_like Constraining matrix of shape (n, q): q quantitative
-        environmental variables at n sites.
+    y : DataFrame
+        Samples by features table (n, m)
+    x : DataFrame
+        Samples by constraints table (n, q)
+    scaling : int, {1, 2}, optional
+        Scaling type 1 maintains :math:`\chi^2` distances between rows.
+        Scaling type 2 preserver :math:`\chi^2` distances between columns.
+        For a more detailed explanation of the interpretation, check Legendre &
+        Legendre 1998, section 9.4.3.
+
+    Returns
+    -------
+    OrdinationResults
+        Object that stores the cca results.
+
+    Raises
+    ------
+    ValueError
+        If `x` and `y` have different number of rows
+        If `y` contains negative values
+        If `y` contains a row of only 0's.
+    NotImplementedError
+        If scaling is not 1 or 2.
 
     Notes
     -----
 
     The algorithm is based on [3]_, \S 11.2, and is expected to give
-    the same results as ``cca(Y, X)`` in R's package vegan, except
+    the same results as ``cca(y, x)`` in R's package vegan, except
     that this implementation won't drop constraining variables due to
     perfect collinearity: the user needs to choose which ones to
     input.
@@ -62,16 +83,11 @@ class CCA(Ordination):
     "environmental variables" and is not well suited to analyze
     ecological data.
 
-    In data analysis, ordination (or multivariate gradient analysis)
-    complements clustering by arranging objects (species, samples...)
-    along gradients so that similar ones are closer and dissimilar
-    ones are further. There's a good overview of the available
-    techniques in http://ordination.okstate.edu/overview.htm.
-
     See Also
     --------
-    CA
-    RDA
+    ca
+    rda
+    OrdinationResults
 
     References
     ----------
@@ -87,167 +103,132 @@ class CCA(Ordination):
        Ecology. Elsevier, Amsterdam.
 
     """
-    short_method_name = 'CCA'
-    long_method_name = 'Canonical Correspondence Analysis'
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, Y, X, site_ids, species_ids):
-        self.Y = np.asarray(Y, dtype=np.float64)
-        self.X = np.asarray(X, dtype=np.float64)
-        self.site_ids = site_ids
-        self.species_ids = species_ids
-        self._cca()
-
-    def _cca(self):
-        X, Y = self.X, self.Y
-        if X.shape[0] != Y.shape[0]:
-            raise ValueError("Contingency and environmental tables must have"
-                             " the same number of rows (sites). X has {0}"
-                             " rows but Y has {1}.".format(X.shape[0],
+    Y = y.as_matrix()
+    X = x.as_matrix()
+
+    # Perform parameter sanity checks
+    if X.shape[0] != Y.shape[0]:
+        raise ValueError("The samples by features table 'y' and the samples by"
+                         " constraints table 'x' must have the same number of "
+                         " rows. 'y': {0} 'x': {1}".format(X.shape[0],
                                                            Y.shape[0]))
-        if Y.min() < 0:
-            raise ValueError("Contingency table must be nonnegative")
-        row_max = Y.max(axis=1)
-        if np.any(row_max <= 0):
-            # Or else the lstsq call to compute Y_hat breaks
-            raise ValueError("Contingency table cannot contain row of only 0s")
-
-        # Step 1 (similar to Pearson chi-square statistic)
-        grand_total = Y.sum()
-        Q = Y / grand_total  # Relative frequencies of X (contingency table)
-
-        # Species and site weights (marginal totals)
-        column_marginals = Q.sum(axis=0)
-        row_marginals = Q.sum(axis=1)
-
-        # Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
-        # scaled version of the contribution of each cell towards Pearson
-        # chi-square statistic.
-        expected = np.outer(row_marginals, column_marginals)
-        Q_bar = (Q - expected) / np.sqrt(expected)
-
-        # Step 2. Standardize columns of Y with respect to site weights,
-        # using the maximum likelyhood variance estimator (Legendre &
-        # Legendre 1998, p. 595)
-        X = scale(X, weights=row_marginals, ddof=0)
-
-        # Step 3. Weighted multiple regression.
-        X_weighted = row_marginals[:, None]**0.5 * X
-        B, _, rank_lstsq, _ = np.linalg.lstsq(X_weighted, Q_bar)
-        Y_hat = X_weighted.dot(B)
-        Y_res = Q_bar - Y_hat
-
-        # Step 4. Eigenvalue decomposition
-        u, s, vt = np.linalg.svd(Y_hat, full_matrices=False)
-        rank = svd_rank(Y_hat.shape, s)
-        s = s[:rank]
-        u = u[:, :rank]
-        vt = vt[:rank]
-        U = vt.T
-
-        # Step 5. Eq. 9.38
-        U_hat = Q_bar.dot(U) * s**-1
-
-        # Residuals analysis
-        u_res, s_res, vt_res = np.linalg.svd(Y_res, full_matrices=False)
-        rank = svd_rank(Y_res.shape, s_res)
-        s_res = s_res[:rank]
-        u_res = u_res[:, :rank]
-        vt_res = vt_res[:rank]
-
-        U_res = vt_res.T
-        U_hat_res = Y_res.dot(U_res) * s_res**-1
-
-        # Storing values needed to compute scores
-        iter_ = (('column_marginals', column_marginals),
-                 ('row_marginals', row_marginals),
-                 ('U', U),
-                 ('U_res', U_res),
-                 ('U_hat', U_hat),
-                 ('U_hat_res', U_hat_res),
-                 ('u', u), ('Y_hat', Y_hat),
-                 ('s', s), ('s_res', s_res),
-                 ('X_weighted', X_weighted[:, :rank_lstsq]))
-        for val_name, val in iter_:
-            setattr(self, val_name, val)
-
-        self.eigenvalues = np.r_[s, s_res]**2
-
-    @experimental(as_of="0.4.0")
-    def scores(self, scaling):
-        r"""Compute site and species scores for different scalings.
-
-        Parameters
-        ----------
-        scaling : int
-            The same options as in `CA` are available, and the
-            interpretation is the same.
-
-        Returns
-        -------
-        OrdinationResults
-            Object that stores the computed eigenvalues, the
-            proportion explained by each of them (per unit),
-            transformed coordinates for species and sites, biplot
-            scores, site constraints, etc.
-
-        See Also
-        --------
-        OrdinationResults
-        """
-        if scaling not in {1, 2}:
-            raise NotImplementedError(
-                "Scaling {0} not implemented.".format(scaling))
-        # In this case scores are also a bit intertwined, so we'll
-        # almost compute them both and then choose.
-
-        # Scalings (p. 596 L&L 1998):
-        # Species scores, scaling 1
-        V = (self.column_marginals**-0.5)[:, None] * self.U
-
-        # Site scores, scaling 2
-        V_hat = (self.row_marginals**-0.5)[:, None] * self.U_hat
-
-        # Site scores, scaling 1
-        F = V_hat * self.s
-
-        # Species scores, scaling 2
-        F_hat = V * self.s
-
-        # Site scores which are linear combinations of environmental
-        # variables
-        Z_scaling1 = ((self.row_marginals**-0.5)[:, None] *
-                      self.Y_hat.dot(self.U))
-        Z_scaling2 = Z_scaling1 * self.s**-1
-
-        # Species residual scores, scaling 1
-        V_res = (self.column_marginals**-0.5)[:, None] * self.U_res
-
-        # Site residual scores, scaling 2
-        V_hat_res = (self.row_marginals**-0.5)[:, None] * self.U_hat_res
-
-        # Site residual scores, scaling 1
-        F_res = V_hat_res * self.s_res
-
-        # Species residual scores, scaling 2
-        F_hat_res = V_res * self.s_res
-
-        eigvals = self.eigenvalues
-        if scaling == 1:
-            species_scores = np.hstack((V, V_res))
-            site_scores = np.hstack((F, F_res))
-            site_constraints = np.hstack((Z_scaling1, F_res))
-        elif scaling == 2:
-            species_scores = np.hstack((F_hat, F_hat_res))
-            site_scores = np.hstack((V_hat, V_hat_res))
-            site_constraints = np.hstack((Z_scaling2, V_hat_res))
-
-        biplot_scores = corr(self.X_weighted, self.u)
-        return OrdinationResults(eigvals=eigvals,
-                                 proportion_explained=eigvals / eigvals.sum(),
-                                 species=species_scores,
-                                 site=site_scores,
-                                 biplot=biplot_scores,
-                                 site_constraints=site_constraints,
-                                 site_ids=self.site_ids,
-                                 species_ids=self.species_ids)
+    if Y.min() < 0:
+        raise ValueError(
+            "The samples by features table 'y' must be nonnegative")
+    row_max = Y.max(axis=1)
+    if np.any(row_max <= 0):
+        # Or else the lstsq call to compute Y_hat breaks
+        raise ValueError("The samples by features table 'y' cannot contain a "
+                         "row with only 0's")
+    if scaling not in {1, 2}:
+        raise NotImplementedError(
+            "Scaling {0} not implemented.".format(scaling))
+
+    # Step 1 (similar to Pearson chi-square statistic)
+    grand_total = Y.sum()
+    Q = Y / grand_total  # Relative frequencies of Y (contingency table)
+
+    # Features and sample weights (marginal totals)
+    column_marginals = Q.sum(axis=0)
+    row_marginals = Q.sum(axis=1)
+
+    # Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
+    # scaled version of the contribution of each cell towards Pearson
+    # chi-square statistic.
+    expected = np.outer(row_marginals, column_marginals)
+    Q_bar = (Q - expected) / np.sqrt(expected)
+
+    # Step 2. Standardize columns of X with respect to sample weights,
+    # using the maximum likelihood variance estimator (Legendre &
+    # Legendre 1998, p. 595)
+    X = scale(X, weights=row_marginals, ddof=0)
+
+    # Step 3. Weighted multiple regression.
+    X_weighted = row_marginals[:, None]**0.5 * X
+    B, _, rank_lstsq, _ = lstsq(X_weighted, Q_bar)
+    Y_hat = X_weighted.dot(B)
+    Y_res = Q_bar - Y_hat
+
+    # Step 4. Eigenvalue decomposition
+    u, s, vt = svd(Y_hat, full_matrices=False)
+    rank = svd_rank(Y_hat.shape, s)
+    s = s[:rank]
+    u = u[:, :rank]
+    vt = vt[:rank]
+    U = vt.T
+
+    # Step 5. Eq. 9.38
+    U_hat = Q_bar.dot(U) * s**-1
+
+    # Residuals analysis
+    u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
+    rank = svd_rank(Y_res.shape, s_res)
+    s_res = s_res[:rank]
+    u_res = u_res[:, :rank]
+    vt_res = vt_res[:rank]
+
+    U_res = vt_res.T
+    U_hat_res = Y_res.dot(U_res) * s_res**-1
+
+    eigenvalues = np.r_[s, s_res]**2
+
+    # Scalings (p. 596 L&L 1998):
+    # feature scores, scaling 1
+    V = (column_marginals**-0.5)[:, None] * U
+
+    # sample scores, scaling 2
+    V_hat = (row_marginals**-0.5)[:, None] * U_hat
+
+    # sample scores, scaling 1
+    F = V_hat * s
+
+    # feature scores, scaling 2
+    F_hat = V * s
+
+    # Sample scores which are linear combinations of constraint
+    # variables
+    Z_scaling1 = ((row_marginals**-0.5)[:, None] *
+                  Y_hat.dot(U))
+    Z_scaling2 = Z_scaling1 * s**-1
+
+    # Feature residual scores, scaling 1
+    V_res = (column_marginals**-0.5)[:, None] * U_res
+
+    # Sample residual scores, scaling 2
+    V_hat_res = (row_marginals**-0.5)[:, None] * U_hat_res
+
+    # Sample residual scores, scaling 1
+    F_res = V_hat_res * s_res
+
+    # Feature residual scores, scaling 2
+    F_hat_res = V_res * s_res
+
+    eigvals = eigenvalues
+    if scaling == 1:
+        features_scores = np.hstack((V, V_res))
+        sample_scores = np.hstack((F, F_res))
+        sample_constraints = np.hstack((Z_scaling1, F_res))
+    elif scaling == 2:
+        features_scores = np.hstack((F_hat, F_hat_res))
+        sample_scores = np.hstack((V_hat, V_hat_res))
+        sample_constraints = np.hstack((Z_scaling2, V_hat_res))
+
+    biplot_scores = corr(X_weighted, u)
+
+    pc_ids = ['CCA%d' % (i+1) for i in range(len(eigenvalues))]
+    sample_ids = y.index
+    feature_ids = y.columns
+    eigvals = pd.Series(eigenvalues, index=pc_ids)
+    samples = pd.DataFrame(sample_scores,
+                           columns=pc_ids, index=sample_ids)
+    features = pd.DataFrame(features_scores,
+                            columns=pc_ids, index=feature_ids)
+    biplot_scores = pd.DataFrame(biplot_scores)
+    sample_constraints = pd.DataFrame(sample_constraints,
+                                      index=sample_ids, columns=pc_ids)
+
+    return OrdinationResults(
+        "CCA", "Canonical Correspondence Analysis", eigvals, samples,
+        features=features, biplot_scores=biplot_scores,
+        sample_constraints=sample_constraints,
+        proportion_explained=eigvals / eigvals.sum())
diff --git a/skbio/stats/ordination/_correspondence_analysis.py b/skbio/stats/ordination/_correspondence_analysis.py
index 18e55de..1f1ca9c 100644
--- a/skbio/stats/ordination/_correspondence_analysis.py
+++ b/skbio/stats/ordination/_correspondence_analysis.py
@@ -9,18 +9,21 @@
 from __future__ import absolute_import, division, print_function
 
 import numpy as np
+import pandas as pd
+from scipy.linalg import svd
 
-from ._base import Ordination, OrdinationResults
+from skbio._base import OrdinationResults
 from ._utils import svd_rank
 from skbio.util._decorator import experimental
 
 
-class CA(Ordination):
+ at experimental(as_of="0.4.0")
+def ca(X, scaling=1):
     r"""Compute correspondence analysis, a multivariate statistical
     technique for ordination.
 
-    In general, rows in the data table will correspond to sites and
-    columns to species, but the method is symmetric. In order to
+    In general, rows in the data table will correspond to samples and
+    columns to features, but the method is symmetric. In order to
     measure the correspondence between rows and columns, the
     :math:`\chi^2` distance is used, and those distances are preserved
     in the transformed space. The :math:`\chi^2` distance doesn't take
@@ -33,158 +36,156 @@ class CA(Ordination):
 
     Parameters
     ----------
-    X : array_like
-        Contingency table. It can be applied to different kinds of
-        data tables but data must be non-negative and dimensionally
-        homogeneous (quantitative or binary).
+    X : pd.DataFrame
+        Samples by features table (n, m). It can be applied to different kinds
+        of data tables but data must be non-negative and dimensionally
+        homogeneous (quantitative or binary). The rows correspond to the
+        samples and the columns correspond to the features.
+    scaling : {1, 2}
+        For a more detailed explanation of the interpretation, check Legendre &
+        Legendre 1998, section 9.4.3. The notes that follow are quick
+        recommendations.
+
+        Scaling type 1 maintains :math:`\chi^2` distances between rows
+        (samples): in the transformed space, the euclidean distances between
+        rows are equal to the :math:`\chi^2` distances between rows in the
+        original space. It should be used when studying the ordination of
+        samples. Rows (samples) that are near a column (features) have high
+        contributions from it.
+
+        Scaling type 2 preserves :math:`\chi^2` distances between columns
+        (features), so euclidean distance between columns after transformation
+        is equal to :math:`\chi^2` distance between columns in the original
+        space. It is best used when we are interested in the ordination of
+        features. A column (features) that is next to a row (sample) means that
+        it is more abundant there.
+
+        Other types of scalings are currently not implemented, as they're less
+        used by ecologists (Legendre & Legendre 1998, p. 456).
+
+        In general, features appearing far from the center of the biplot and
+        far from its edges will probably exhibit better relationships than
+        features either in the center (may be multimodal features, not related
+        to the shown ordination axes...) or the edges (sparse features...).
+
+    Returns
+    -------
+    OrdinationResults
+        Object that stores the computed eigenvalues, the transformed sample
+        coordinates and the transformed features coordinates.
+
+    Raises
+    ------
+    NotImplementedError
+        If the scaling value is not either `1` or `2`.
+    ValueError
+        If any of the input matrix elements are negative.
 
     Notes
     -----
-    The algorithm is based on [1]_, \S 9.4.1., and is expected to give
-    the same results as ``cca(X)`` in R's package vegan.
+    The algorithm is based on [1]_, \S 9.4.1., and is expected to give the same
+    results as ``cca(X)`` in R's package vegan.
 
     See Also
     --------
-    CCA
+    cca
 
     References
     ----------
-    .. [1] Legendre P. and Legendre L. 1998. Numerical
-       Ecology. Elsevier, Amsterdam.
+    .. [1] Legendre P. and Legendre L. 1998. Numerical Ecology. Elsevier,
+       Amsterdam.
 
     """
+
+    if scaling not in {1, 2}:
+        raise NotImplementedError(
+            "Scaling {0} not implemented.".format(scaling))
+
     short_method_name = 'CA'
-    long_method_name = 'Canonical Analysis'
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, X, row_ids, column_ids):
-        self.X = np.asarray(X, dtype=np.float64)
-        self._ca()
-        self.row_ids = row_ids
-        self.column_ids = column_ids
-
-    def _ca(self):
-        X = self.X
-        r, c = X.shape
-
-        if X.min() < 0:
-            raise ValueError("Input matrix elements must be non-negative.")
-
-        # Step 1 (similar to Pearson chi-square statistic)
-        grand_total = X.sum()
-        Q = X / grand_total
-
-        column_marginals = Q.sum(axis=0)
-        row_marginals = Q.sum(axis=1)
-        # Let's store them since they're needed to compute scores
-        self.column_marginals = column_marginals
-        self.row_marginals = row_marginals
-
-        # Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's
-        # an scaled version of the contribution of each cell towards
-        # Pearson chi-square statistic.
-        expected = np.outer(row_marginals, column_marginals)
-        Q_bar = (Q - expected) / np.sqrt(expected)  # Eq. 9.32
-
-        # Step 2 (Singular Value Decomposition)
-        U_hat, W, Ut = np.linalg.svd(Q_bar, full_matrices=False)
-        # Due to the centering, there are at most min(r, c) - 1 non-zero
-        # eigenvalues (which are all positive)
-        rank = svd_rank(Q_bar.shape, W)
-        assert rank <= min(r, c) - 1
-        self.U_hat = U_hat[:, :rank]
-        self.W = W[:rank]
-        self.U = Ut[:rank].T
-
-    @experimental(as_of="0.4.0")
-    def scores(self, scaling):
-        r"""Compute site and species scores for different scalings.
-
-        Parameters
-        ----------
-        scaling : int
-
-            For a more detailed explanation of the interpretation, check
-            Legendre & Legendre 1998, section 9.4.3. The notes that
-            follow are quick recommendations.
-
-            Scaling type 1 maintains :math:`\chi^2` distances between
-            rows (sites): in the transformed space, the euclidean
-            distances between rows are equal to the :math:`\chi^2`
-            distances between rows in the original space. It should be
-            used when studying the ordination of sites. Rows (sites)
-            that are near a column (species) have high contributions
-            from it.
-
-            Scaling type 2 preserves :math:`\chi^2` distances between
-            columns (species), so euclidean distance between columns
-            after transformation is equal to :math:`\chi^2` distance
-            between columns in the original space. It is best used
-            when we are interested in the ordination of species. A
-            column (species) that is next to a row (site) means that
-            it is more abundant there.
-
-            Other types of scalings are currently not implemented, as
-            they're less used by ecologists (Legendre & Legendre 1998,
-            p. 456).
-
-            In general, species appearing far from the center of the
-            biplot and far from its edges will probably exhibit better
-            relationships than species either in the center (may be
-            multimodal species, not related to the shown ordination
-            axes...) or the edges (sparse species...).
-
-        Returns
-        -------
-        OrdinationResults
-            Object that stores the computed eigenvalues, the
-            proportion explained by each of them (per unit),
-            transformed coordinates, etc.
-
-        See Also
-        --------
-        OrdinationResults
-        """
-
-        if scaling not in {1, 2}:
-            raise NotImplementedError(
-                "Scaling {0} not implemented.".format(scaling))
-        # Both scalings are a bit intertwined, so we'll compute both and
-        # then choose
-        V = self.column_marginals[:, None]**-0.5 * self.U
-        V_hat = self.row_marginals[:, None]**-0.5 * self.U_hat
-        F = V_hat * self.W
-        # According to Formula 9.43, this should hold
-        # assert np.allclose(F, (row_marginals**-1)[:, None] * Q.dot(V))
-        # but it doesn't (notice that W**2==Lambda):
-        # (9.43a) F = V_hat W = D(p_i+)^{-1/2} U_hat W
-        #           = D(p_i+)^{-1/2} Q_bar U W^{-1} W  (substituting 9.38)
-        #           = D(p_i+)^{-1/2} Q_bar U
-        # (9.43b) F = D(p_i+)^{-1} Q V
-        #           = D(p_i+)^{-1} Q D(p_+j)^{-1/2} U  (substituting 9.41)
-        #           = D(p_i+)^{-1/2} D(p_i+)^{-1/2} Q D(p_+j)^{-1/2} U
-        #           = D(p_i+)^{-1/2} Q_tilde U         (using 9.40)
-        # It holds if we replace Q in 9.43b with Q after centering, ie
-        # assert np.allclose(
-        #    F,
-        #    (row_marginals**-1)[:, None] * (Q - expected).dot(V))
-        # Comparing results with vegan and the examples in the book, 9.43a
-        # is the right one. The same issue happens in 9.44, where also
-        # 9.44a is the one that matches vegan's output.
-        # (9.44a) F_hat = V W = D(p_+j)^{-1/2} U W
-        #               = D(p_+j)^{-1/2} Q_bar' U_hat W^{-1} W (using 9.39)
-        #               = D(p_+j)^{-1/2} Q_bar' U_hat
-        # (9.44b) F_hat = D(p_+j)^{-1} Q' V_hat
-        #               = D(p_+j)^{-1/2} Q_tilde' U_hat (using 9.40 and 9.42)
-        F_hat = V * self.W
-
-        # Eigenvalues
-        eigvals = self.W**2
-
-        # Species scores
-        species_scores = [V, F_hat][scaling - 1]
-        # Site scores (weighted averages of species scores)
-        site_scores = [F, V_hat][scaling - 1]
-        return OrdinationResults(eigvals=eigvals, species=species_scores,
-                                 site=site_scores, site_ids=self.row_ids,
-                                 species_ids=self.column_ids)
+    long_method_name = 'Correspondance Analysis'
+
+    # we deconstruct the dataframe to avoid duplicating the data and be able
+    # to perform operations on the matrix
+    row_ids = X.index
+    column_ids = X.columns
+    X = np.asarray(X.values, dtype=np.float64)
+
+    # Correspondance Analysis
+    r, c = X.shape
+
+    if X.min() < 0:
+        raise ValueError("Input matrix elements must be non-negative.")
+
+    # Step 1 (similar to Pearson chi-square statistic)
+    grand_total = X.sum()
+    Q = X / grand_total
+
+    column_marginals = Q.sum(axis=0)
+    row_marginals = Q.sum(axis=1)
+
+    # Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's
+    # an scaled version of the contribution of each cell towards
+    # Pearson chi-square statistic.
+    expected = np.outer(row_marginals, column_marginals)
+    Q_bar = (Q - expected) / np.sqrt(expected)  # Eq. 9.32
+
+    # Step 2 (Singular Value Decomposition)
+    U_hat, W, Ut = svd(Q_bar, full_matrices=False)
+    # Due to the centering, there are at most min(r, c) - 1 non-zero
+    # eigenvalues (which are all positive)
+    rank = svd_rank(Q_bar.shape, W)
+    assert rank <= min(r, c) - 1
+    U_hat = U_hat[:, :rank]
+    W = W[:rank]
+    U = Ut[:rank].T
+
+    # Both scalings are a bit intertwined, so we'll compute both and
+    # then choose
+    V = column_marginals[:, None]**-0.5 * U
+    V_hat = row_marginals[:, None]**-0.5 * U_hat
+    F = V_hat * W
+    # According to Formula 9.43, this should hold
+    # assert np.allclose(F, (row_marginals**-1)[:, None] * Q.dot(V))
+    # but it doesn't (notice that W**2==Lambda):
+    # (9.43a) F = V_hat W = D(p_i+)^{-1/2} U_hat W
+    #           = D(p_i+)^{-1/2} Q_bar U W^{-1} W  (substituting 9.38)
+    #           = D(p_i+)^{-1/2} Q_bar U
+    # (9.43b) F = D(p_i+)^{-1} Q V
+    #           = D(p_i+)^{-1} Q D(p_+j)^{-1/2} U  (substituting 9.41)
+    #           = D(p_i+)^{-1/2} D(p_i+)^{-1/2} Q D(p_+j)^{-1/2} U
+    #           = D(p_i+)^{-1/2} Q_tilde U         (using 9.40)
+    # It holds if we replace Q in 9.43b with Q after centering, ie
+    # assert np.allclose(
+    #    F,
+    #    (row_marginals**-1)[:, None] * (Q - expected).dot(V))
+    # Comparing results with vegan and the examples in the book, 9.43a
+    # is the right one. The same issue happens in 9.44, where also
+    # 9.44a is the one that matches vegan's output.
+    # (9.44a) F_hat = V W = D(p_+j)^{-1/2} U W
+    #               = D(p_+j)^{-1/2} Q_bar' U_hat W^{-1} W (using 9.39)
+    #               = D(p_+j)^{-1/2} Q_bar' U_hat
+    # (9.44b) F_hat = D(p_+j)^{-1} Q' V_hat
+    #               = D(p_+j)^{-1/2} Q_tilde' U_hat (using 9.40 and 9.42)
+    F_hat = V * W
+
+    # Eigenvalues
+    eigvals = W**2
+
+    # features scores
+    features_scores = [V, F_hat][scaling - 1]
+    # sample scores (weighted averages of features scores)
+    sample_scores = [F, V_hat][scaling - 1]
+
+    # build the OrdinationResults object
+    sample_columns = ['%s%d' % (short_method_name, i+1)
+                      for i in range(sample_scores.shape[1])]
+    feature_columns = ['%s%d' % (short_method_name, i+1)
+                       for i in range(features_scores.shape[1])]
+
+    eigvals = pd.Series(eigvals, ['%s%d' % (short_method_name, i+1) for i in
+                                  range(eigvals.shape[0])])
+    samples = pd.DataFrame(sample_scores, row_ids, sample_columns)
+    features = pd.DataFrame(features_scores, column_ids, feature_columns)
+
+    return OrdinationResults(short_method_name, long_method_name, eigvals,
+                             samples=samples, features=features)
diff --git a/skbio/stats/ordination/_principal_coordinate_analysis.py b/skbio/stats/ordination/_principal_coordinate_analysis.py
index 2c3092c..7320ef9 100644
--- a/skbio/stats/ordination/_principal_coordinate_analysis.py
+++ b/skbio/stats/ordination/_principal_coordinate_analysis.py
@@ -10,10 +10,13 @@ from __future__ import absolute_import, division, print_function
 
 from warnings import warn
 
+import pandas as pd
 import numpy as np
+from scipy.linalg import eigh
 
+from skbio._base import OrdinationResults
 from skbio.stats.distance import DistanceMatrix
-from ._base import Ordination, OrdinationResults
+from ._utils import e_matrix, f_matrix
 from skbio.util._decorator import experimental
 
 # - In cogent, after computing eigenvalues/vectors, the imaginary part
@@ -25,7 +28,8 @@ from skbio.util._decorator import experimental
 #   so, so I'm not doing that.
 
 
-class PCoA(Ordination):
+ at experimental(as_of="0.4.0")
+def pcoa(distance_matrix):
     r"""Perform Principal Coordinate Analysis.
 
     Principal Coordinate Analysis (PCoA) is a method similar to PCA
@@ -64,111 +68,68 @@ class PCoA(Ordination):
        appear, allowing the user to decide if they can be safely
        ignored.
     """
-    short_method_name = 'PCoA'
-    long_method_name = 'Principal Coordinate Analysis'
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, distance_matrix):
-        if isinstance(distance_matrix, DistanceMatrix):
-            self.dm = np.asarray(distance_matrix.data, dtype=np.float64)
-            self.ids = distance_matrix.ids
-        else:
-            raise TypeError("Input must be a DistanceMatrix.")
-        self._pcoa()
-
-    def _pcoa(self):
-        E_matrix = self._E_matrix(self.dm)
-
-        # If the used distance was euclidean, pairwise distances
-        # needn't be computed from the data table Y because F_matrix =
-        # Y.dot(Y.T) (if Y has been centred).
-        F_matrix = self._F_matrix(E_matrix)
-
-        # If the eigendecomposition ever became a bottleneck, it could
-        # be replaced with an iterative version that computes the
-        # largest k eigenvectors.
-        eigvals, eigvecs = np.linalg.eigh(F_matrix)
-
-        # eigvals might not be ordered, so we order them (at least one
-        # is zero). cogent makes eigenvalues positive by taking the
-        # abs value, but that doesn't seem to be an approach accepted
-        # by L&L to deal with negative eigenvalues. We raise a warning
-        # in that case. First, we make values close to 0 equal to 0.
-        negative_close_to_zero = np.isclose(eigvals, 0)
-        eigvals[negative_close_to_zero] = 0
-        if np.any(eigvals < 0):
-            warn(
-                "The result contains negative eigenvalues."
-                " Please compare their magnitude with the magnitude of some"
-                " of the largest positive eigenvalues. If the negative ones"
-                " are smaller, it's probably safe to ignore them, but if they"
-                " are large in magnitude, the results won't be useful. See the"
-                " Notes section for more details. The smallest eigenvalue is"
-                " {0} and the largest is {1}.".format(eigvals.min(),
-                                                      eigvals.max()),
-                RuntimeWarning
-                )
-        idxs_descending = eigvals.argsort()[::-1]
-        self.eigvals = eigvals[idxs_descending]
-        self.eigvecs = eigvecs[:, idxs_descending]
-
-    @experimental(as_of="0.4.0")
-    def scores(self):
-        """Compute coordinates in transformed space.
-
-        Returns
-        -------
-        OrdinationResults
-            Object that stores the computed eigenvalues, the
-            proportion explained by each of them (per unit) and
-            transformed coordinates, etc.
-
-        See Also
-        --------
-        OrdinationResults
-        """
-        # Scale eigenvalues to have lenght = sqrt(eigenvalue). This
-        # works because np.linalg.eigh returns normalized
-        # eigenvectors. Each row contains the coordinates of the
-        # objects in the space of principal coordinates. Note that at
-        # least one eigenvalue is zero because only n-1 axes are
-        # needed to represent n points in an euclidean space.
-
-        # If we return only the coordinates that make sense (i.e., that have a
-        # corresponding positive eigenvalue), then Jackknifed Beta Diversity
-        # won't work as it expects all the OrdinationResults to have the same
-        # number of coordinates. In order to solve this issue, we return the
-        # coordinates that have a negative eigenvalue as 0
-        num_positive = (self.eigvals >= 0).sum()
-        eigvecs = self.eigvecs
-        eigvecs[:, num_positive:] = np.zeros(eigvecs[:, num_positive:].shape)
-        eigvals = self.eigvals
-        eigvals[num_positive:] = np.zeros(eigvals[num_positive:].shape)
-
-        coordinates = eigvecs * np.sqrt(eigvals)
-
-        proportion_explained = eigvals / eigvals.sum()
-
-        return OrdinationResults(eigvals=eigvals, site=coordinates,
-                                 proportion_explained=proportion_explained,
-                                 site_ids=self.ids)
-
-    @staticmethod
-    def _E_matrix(distance_matrix):
-        """Compute E matrix from a distance matrix.
-
-        Squares and divides by -2 the input elementwise. Eq. 9.20 in
-        Legendre & Legendre 1998."""
-        return distance_matrix * distance_matrix / -2
-
-    @staticmethod
-    def _F_matrix(E_matrix):
-        """Compute F matrix from E matrix.
-
-        Centring step: for each element, the mean of the corresponding
-        row and column are substracted, and the mean of the whole
-        matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
-        row_means = E_matrix.mean(axis=1, keepdims=True)
-        col_means = E_matrix.mean(axis=0, keepdims=True)
-        matrix_mean = E_matrix.mean()
-        return E_matrix - row_means - col_means + matrix_mean
+    distance_matrix = DistanceMatrix(distance_matrix)
+
+    E_matrix = e_matrix(distance_matrix.data)
+
+    # If the used distance was euclidean, pairwise distances
+    # needn't be computed from the data table Y because F_matrix =
+    # Y.dot(Y.T) (if Y has been centred).
+    F_matrix = f_matrix(E_matrix)
+
+    # If the eigendecomposition ever became a bottleneck, it could
+    # be replaced with an iterative version that computes the
+    # largest k eigenvectors.
+    eigvals, eigvecs = eigh(F_matrix)
+
+    # eigvals might not be ordered, so we order them (at least one
+    # is zero). cogent makes eigenvalues positive by taking the
+    # abs value, but that doesn't seem to be an approach accepted
+    # by L&L to deal with negative eigenvalues. We raise a warning
+    # in that case. First, we make values close to 0 equal to 0.
+    negative_close_to_zero = np.isclose(eigvals, 0)
+    eigvals[negative_close_to_zero] = 0
+    if np.any(eigvals < 0):
+        warn(
+            "The result contains negative eigenvalues."
+            " Please compare their magnitude with the magnitude of some"
+            " of the largest positive eigenvalues. If the negative ones"
+            " are smaller, it's probably safe to ignore them, but if they"
+            " are large in magnitude, the results won't be useful. See the"
+            " Notes section for more details. The smallest eigenvalue is"
+            " {0} and the largest is {1}.".format(eigvals.min(),
+                                                  eigvals.max()),
+            RuntimeWarning
+            )
+    idxs_descending = eigvals.argsort()[::-1]
+    eigvals = eigvals[idxs_descending]
+    eigvecs = eigvecs[:, idxs_descending]
+
+    # Scale eigenvalues to have lenght = sqrt(eigenvalue). This
+    # works because np.linalg.eigh returns normalized
+    # eigenvectors. Each row contains the coordinates of the
+    # objects in the space of principal coordinates. Note that at
+    # least one eigenvalue is zero because only n-1 axes are
+    # needed to represent n points in an euclidean space.
+
+    # If we return only the coordinates that make sense (i.e., that have a
+    # corresponding positive eigenvalue), then Jackknifed Beta Diversity
+    # won't work as it expects all the OrdinationResults to have the same
+    # number of coordinates. In order to solve this issue, we return the
+    # coordinates that have a negative eigenvalue as 0
+    num_positive = (eigvals >= 0).sum()
+    eigvecs[:, num_positive:] = np.zeros(eigvecs[:, num_positive:].shape)
+    eigvals[num_positive:] = np.zeros(eigvals[num_positive:].shape)
+
+    coordinates = eigvecs * np.sqrt(eigvals)
+    proportion_explained = eigvals / eigvals.sum()
+
+    axis_labels = ['PC%d' % i for i in range(1, eigvals.size + 1)]
+    return OrdinationResults(
+        short_method_name='PCoA',
+        long_method_name='Principal Coordinate Analysis',
+        eigvals=pd.Series(eigvals, index=axis_labels),
+        samples=pd.DataFrame(coordinates, index=distance_matrix.ids,
+                             columns=axis_labels),
+        proportion_explained=pd.Series(proportion_explained,
+                                       index=axis_labels))
diff --git a/skbio/stats/ordination/_redundancy_analysis.py b/skbio/stats/ordination/_redundancy_analysis.py
index aa85c5f..eb3507e 100644
--- a/skbio/stats/ordination/_redundancy_analysis.py
+++ b/skbio/stats/ordination/_redundancy_analysis.py
@@ -9,17 +9,20 @@
 from __future__ import absolute_import, division, print_function
 
 import numpy as np
+import pandas as pd
+from scipy.linalg import svd, lstsq
 
-from ._base import Ordination, OrdinationResults
+from skbio._base import OrdinationResults
 from ._utils import corr, svd_rank, scale
 from skbio.util._decorator import experimental
 
 
-class RDA(Ordination):
+ at experimental(as_of="0.4.0")
+def rda(y, x, scale_Y=False, scaling=1):
     r"""Compute redundancy analysis, a type of canonical analysis.
 
     It is related to PCA and multiple regression because the explained
-    variables `Y` are fitted to the explanatory variables `X` and PCA
+    variables `y` are fitted to the explanatory variables `x` and PCA
     is then performed on the fitted values. A similar process is
     performed on the residuals.
 
@@ -28,26 +31,53 @@ class RDA(Ordination):
 
     Parameters
     ----------
-    Y : array_like
-        :math:`n \times p` response matrix. Its columns need be
-        dimensionally homogeneous (or you can set `scale_Y=True`).
-    X : array_like
+    y : pd.DataFrame
+        :math:`n \times p` response matrix, where :math:`n` is the number
+        of samples and :math:`p` is the number of features. Its columns
+        need be dimensionally homogeneous (or you can set `scale_Y=True`).
+        This matrix is also referred to as the community matrix that
+        commonly stores information about species abundances
+    x : pd.DataFrame
         :math:`n \times m, n \geq m` matrix of explanatory
-        variables. Its columns need not be standardized, but doing so
-        turns regression coefficients into standard regression
-        coefficients.
+        variables, where :math:`n` is the number of samples and
+        :math:`m` is the number of metadata variables. Its columns
+        need not be standardized, but doing so turns regression
+        coefficients into standard regression coefficients.
     scale_Y : bool, optional
         Controls whether the response matrix columns are scaled to
         have unit standard deviation. Defaults to `False`.
+    scaling : int
+        Scaling type 1 produces a distance biplot. It focuses on
+        the ordination of rows (samples) because their transformed
+        distances approximate their original euclidean
+        distances. Especially interesting when most explanatory
+        variables are binary.
+
+        Scaling type 2 produces a correlation biplot. It focuses
+        on the relationships among explained variables (`y`). It
+        is interpreted like scaling type 1, but taking into
+        account that distances between objects don't approximate
+        their euclidean distances.
+
+        See more details about distance and correlation biplots in
+        [1]_, \S 9.1.4.
+
+    Returns
+    -------
+    OrdinationResults
+        Object that stores the computed eigenvalues, the
+        proportion explained by each of them (per unit),
+        transformed coordinates for feature and samples, biplot
+        scores, sample constraints, etc.
 
     Notes
     -----
     The algorithm is based on [1]_, \S 11.1, and is expected to
-    give the same results as ``rda(Y, X)`` in R's package vegan.
+    give the same results as ``rda(y, x)`` in R's package vegan.
 
     See Also
     --------
-    CCA
+    cca
 
     References
     ----------
@@ -55,182 +85,134 @@ class RDA(Ordination):
        Ecology. Elsevier, Amsterdam.
 
     """
-
-    short_method_name = 'RDA'
-    long_method_name = 'Redundancy Analysis'
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, Y, X, site_ids, species_ids, scale_Y=False):
-        self.Y = np.asarray(Y, dtype=np.float64)
-        self.X = np.asarray(X, dtype=np.float64)
-        self.site_ids = site_ids
-        self.species_ids = species_ids
-        self._rda(scale_Y)
-
-    def _rda(self, scale_Y):
-        n, p = self.Y.shape
-        n_, m = self.X.shape
-        if n != n_:
-            raise ValueError(
-                "Both data matrices must have the same number of rows.")
-        if n < m:
-            # Mmm actually vegan is able to do this case, too
-            raise ValueError(
-                "Explanatory variables cannot have less rows than columns.")
-
-        # Centre response variables (they must be dimensionally
-        # homogeneous)
-        Y = scale(self.Y, with_std=scale_Y)
-        # Centre explanatory variables
-        X = scale(self.X, with_std=False)
-
-        # Distribution of variables should be examined and transformed
-        # if necessary (see paragraph 4 in p. 580 L&L 1998)
-
-        # Compute Y_hat (fitted values by multivariate linear
-        # regression, that is, linear least squares). Formula 11.6 in
-        # L&L 1998 involves solving the normal equations, but that fails
-        # when cond(X) ~ eps**(-0.5). A more expensive but much more
-        # stable solution (fails when cond(X) ~ eps**-1) is computed
-        # using the QR decomposition of X = QR:
-        # (11.6) Y_hat = X [X' X]^{-1} X' Y
-        #              = QR [R'Q' QR]^{-1} R'Q' Y
-        #              = QR [R' R]^{-1} R'Q' Y
-        #              = QR R^{-1} R'^{-1} R' Q' Y
-        #              = Q Q' Y
-        # and B (matrix of regression coefficients)
-        # (11.4) B = [X' X]^{-1} X' Y
-        #          = R^{-1} R'^{-1} R' Q' Y
-        #          = R^{-1} Q'
-        # Q, R = np.linalg.qr(X)
-        # Y_hat = Q.dot(Q.T).dot(Y)
-        # B = scipy.linalg.solve_triangular(R, Q.T.dot(Y))
-        # This works provided X has full rank. When not, you can still
-        # fix it using R's pseudoinverse or partitioning R. To avoid any
-        # issues, like the numerical instability when trying to
-        # reproduce an example in L&L where X was rank-deficient, we'll
-        # just use `np.linalg.lstsq`, which uses the SVD decomposition
-        # under the hood and so it's also more expensive.
-        B, _, rank_X, _ = np.linalg.lstsq(X, Y)
-        Y_hat = X.dot(B)
-        # Now let's perform PCA on the fitted values from the multiple
-        # regression
-        u, s, vt = np.linalg.svd(Y_hat, full_matrices=False)
-        # vt are the right eigenvectors, which is what we need to
-        # perform PCA. That is, we're changing points in Y_hat from the
-        # canonical basis to the orthonormal basis given by the right
-        # eigenvectors of Y_hat (or equivalently, the eigenvectors of
-        # the covariance matrix Y_hat.T.dot(Y_hat))
-        # See 3) in p. 583 in L&L 1998
-        rank = svd_rank(Y_hat.shape, s)
-        # Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues
-
-        U = vt[:rank].T  # U as in Fig. 11.2
-
-        # Ordination in the space of response variables. Its columns are
-        # site scores. (Eq. 11.12)
-        F = Y.dot(U)
-        # Ordination in the space of explanatory variables. Its columns
-        # are fitted site scores. (Eq. 11.13)
-        Z = Y_hat.dot(U)
-
-        # Canonical coefficients (formula 11.14)
-        # C = B.dot(U)  # Not used
-
-        Y_res = Y - Y_hat
-        # PCA on the residuals
-        u_res, s_res, vt_res = np.linalg.svd(Y_res, full_matrices=False)
-        # See 9) in p. 587 in L&L 1998
-        rank_res = svd_rank(Y_res.shape, s_res)
-        # Theoretically, there're at most min(p, n - 1) non-zero eigenvaluesas
-
-        U_res = vt_res[:rank_res].T
-        F_res = Y_res.dot(U_res)  # Ordination in the space of residuals
-
-        # Storing values needed to compute scores
-        iter_ = (('U', U), ('U_res', U_res),
-                 ('F', F),
-                 ('F_res', F_res),
-                 ('Z', Z),
-                 ('u', u[:, :rank]))
-        for val_name, val in iter_:
-            setattr(self, val_name, val)
-
-        self.eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
-
-    @experimental(as_of="0.4.0")
-    def scores(self, scaling):
-        """Compute site, species and biplot scores for different scalings.
-
-        Parameters
-        ----------
-        scaling : int
-
-            Scaling type 1 produces a distance biplot. It focuses on
-            the ordination of rows (sites) because their transformed
-            distances approximate their original euclidean
-            distances. Especially interesting when most explanatory
-            variables are binary.
-
-            Scaling type 2 produces a correlation biplot. It focuses
-            on the relationships among explained variables (`Y`). It
-            is interpreted like scaling type 1, but taking into
-            account that distances between objects don't approximate
-            their euclidean distances.
-
-            See more details about distance and correlation biplots in
-            [1]_, \S 9.1.4.
-
-        Returns
-        -------
-        OrdinationResults
-            Object that stores the computed eigenvalues, the
-            proportion explained by each of them (per unit),
-            transformed coordinates for species and sites, biplot
-            scores, site constraints, etc.
-
-        See Also
-        --------
-        OrdinationResults
-
-        References
-        ----------
-
-        .. [1] Legendre P. and Legendre L. 1998. Numerical
-           Ecology. Elsevier, Amsterdam.
-
-        """
-        if scaling not in {1, 2}:
-            raise NotImplementedError("Only scalings 1, 2 available for RDA.")
-        # According to the vegan-FAQ.pdf, the scaling factor for scores
-        # is (notice that L&L 1998 says in p. 586 that such scaling
-        # doesn't affect the interpretation of a biplot):
-        eigvals = self.eigenvalues
-        const = np.sum(eigvals**2)**0.25
-        if scaling == 1:
-            scaling_factor = const
-        elif scaling == 2:
-            scaling_factor = eigvals / const
-        species_scores = np.hstack((self.U, self.U_res)) * scaling_factor
-        site_scores = np.hstack((self.F, self.F_res)) / scaling_factor
-        # TODO not yet used/displayed
-        site_constraints = np.hstack((self.Z, self.F_res)) / scaling_factor
-        # vegan seems to compute them as corr(self.X[:, :rank_X],
-        # self.u) but I don't think that's a good idea. In fact, if
-        # you take the example shown in Figure 11.3 in L&L 1998 you
-        # can see that there's an arrow for each of the 4
-        # environmental variables (depth, coral, sand, other) even if
-        # other = not(coral or sand)
-        biplot_scores = corr(self.X, self.u)
-        # The "Correlations of environmental variables with site
-        # scores" from table 11.4 are quite similar to vegan's biplot
-        # scores, but they're computed like this:
-        # corr(self.X, self.F))
-        return OrdinationResults(eigvals=eigvals,
-                                 proportion_explained=eigvals / eigvals.sum(),
-                                 species=species_scores,
-                                 site=site_scores,
-                                 biplot=biplot_scores,
-                                 site_constraints=site_constraints,
-                                 site_ids=self.site_ids,
-                                 species_ids=self.species_ids)
+    Y = y.as_matrix()
+    X = x.as_matrix()
+
+    n, p = y.shape
+    n_, m = x.shape
+    if n != n_:
+        raise ValueError(
+            "Both data matrices must have the same number of rows.")
+    if n < m:
+        # Mmm actually vegan is able to do this case, too
+        raise ValueError(
+            "Explanatory variables cannot have less rows than columns.")
+
+    sample_ids = y.index
+    feature_ids = y.columns
+    # Centre response variables (they must be dimensionally
+    # homogeneous)
+    Y = scale(Y, with_std=scale_Y)
+    # Centre explanatory variables
+    X = scale(X, with_std=False)
+
+    # Distribution of variables should be examined and transformed
+    # if necessary (see paragraph 4 in p. 580 L&L 1998)
+
+    # Compute Y_hat (fitted values by multivariate linear
+    # regression, that is, linear least squares). Formula 11.6 in
+    # L&L 1998 involves solving the normal equations, but that fails
+    # when cond(X) ~ eps**(-0.5). A more expensive but much more
+    # stable solution (fails when cond(X) ~ eps**-1) is computed
+    # using the QR decomposition of X = QR:
+    # (11.6) Y_hat = X [X' X]^{-1} X' Y
+    #              = QR [R'Q' QR]^{-1} R'Q' Y
+    #              = QR [R' R]^{-1} R'Q' Y
+    #              = QR R^{-1} R'^{-1} R' Q' Y
+    #              = Q Q' Y
+    # and B (matrix of regression coefficients)
+    # (11.4) B = [X' X]^{-1} X' Y
+    #          = R^{-1} R'^{-1} R' Q' Y
+    #          = R^{-1} Q'
+    # Q, R = np.linalg.qr(X)
+    # Y_hat = Q.dot(Q.T).dot(Y)
+    # B = scipy.linalg.solve_triangular(R, Q.T.dot(Y))
+    # This works provided X has full rank. When not, you can still
+    # fix it using R's pseudoinverse or partitioning R. To avoid any
+    # issues, like the numerical instability when trying to
+    # reproduce an example in L&L where X was rank-deficient, we'll
+    # just use `np.linalg.lstsq`, which uses the SVD decomposition
+    # under the hood and so it's also more expensive.
+    B, _, rank_X, _ = lstsq(X, Y)
+    Y_hat = X.dot(B)
+    # Now let's perform PCA on the fitted values from the multiple
+    # regression
+    u, s, vt = svd(Y_hat, full_matrices=False)
+    # vt are the right eigenvectors, which is what we need to
+    # perform PCA. That is, we're changing points in Y_hat from the
+    # canonical basis to the orthonormal basis given by the right
+    # eigenvectors of Y_hat (or equivalently, the eigenvectors of
+    # the covariance matrix Y_hat.T.dot(Y_hat))
+    # See 3) in p. 583 in L&L 1998
+    rank = svd_rank(Y_hat.shape, s)
+    # Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues
+
+    U = vt[:rank].T  # U as in Fig. 11.2
+
+    # Ordination in the space of response variables. Its columns are
+    # sample scores. (Eq. 11.12)
+    F = Y.dot(U)
+    # Ordination in the space of explanatory variables. Its columns
+    # are fitted sample scores. (Eq. 11.13)
+    Z = Y_hat.dot(U)
+
+    # Canonical coefficients (formula 11.14)
+    # C = B.dot(U)  # Not used
+
+    Y_res = Y - Y_hat
+    # PCA on the residuals
+    u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
+    # See 9) in p. 587 in L&L 1998
+    rank_res = svd_rank(Y_res.shape, s_res)
+    # Theoretically, there're at most min(p, n - 1) non-zero eigenvalues as
+
+    U_res = vt_res[:rank_res].T
+    F_res = Y_res.dot(U_res)  # Ordination in the space of residuals
+
+    eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
+
+    # Compute scores
+    if scaling not in {1, 2}:
+        raise NotImplementedError("Only scalings 1, 2 available for RDA.")
+    # According to the vegan-FAQ.pdf, the scaling factor for scores
+    # is (notice that L&L 1998 says in p. 586 that such scaling
+    # doesn't affect the interpretation of a biplot):
+    pc_ids = ['RDA%d' % (i+1) for i in range(len(eigenvalues))]
+    eigvals = pd.Series(eigenvalues, index=pc_ids)
+    const = np.sum(eigenvalues**2)**0.25
+    if scaling == 1:
+        scaling_factor = const
+    elif scaling == 2:
+        scaling_factor = eigenvalues / const
+    feature_scores = np.hstack((U, U_res)) * scaling_factor
+    sample_scores = np.hstack((F, F_res)) / scaling_factor
+
+    feature_scores = pd.DataFrame(feature_scores,
+                                  index=feature_ids,
+                                  columns=pc_ids)
+    sample_scores = pd.DataFrame(sample_scores,
+                                 index=sample_ids,
+                                 columns=pc_ids)
+    # TODO not yet used/displayed
+    sample_constraints = pd.DataFrame(np.hstack((Z, F_res)) / scaling_factor,
+                                      index=sample_ids,
+                                      columns=pc_ids)
+    # Vegan seems to compute them as corr(X[:, :rank_X],
+    # u) but I don't think that's a good idea. In fact, if
+    # you take the example shown in Figure 11.3 in L&L 1998 you
+    # can see that there's an arrow for each of the 4
+    # environmental variables (depth, coral, sand, other) even if
+    # other = not(coral or sand)
+    biplot_scores = pd.DataFrame(corr(X, u))
+    # The "Correlations of environmental variables with sample
+    # scores" from table 11.4 are quite similar to vegan's biplot
+    # scores, but they're computed like this:
+    # corr(X, F))
+    p_explained = pd.Series(eigenvalues / eigenvalues.sum(), index=pc_ids)
+    return OrdinationResults('RDA', 'Redundancy Analysis',
+                             eigvals=eigvals,
+                             proportion_explained=p_explained,
+                             features=feature_scores,
+                             samples=sample_scores,
+                             biplot_scores=biplot_scores,
+                             sample_constraints=sample_constraints)
diff --git a/skbio/stats/ordination/_utils.py b/skbio/stats/ordination/_utils.py
index 3cfd950..409de58 100644
--- a/skbio/stats/ordination/_utils.py
+++ b/skbio/stats/ordination/_utils.py
@@ -9,7 +9,6 @@
 from __future__ import absolute_import, division, print_function
 
 import numpy as np
-import numpy.testing as npt
 
 from skbio.util._decorator import experimental
 
@@ -118,6 +117,7 @@ def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
     """
     if copy:
         a = a.copy()
+    a = np.asarray(a, dtype=np.float64)
     avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
                             with_std=with_std, ddof=ddof)
     if with_mean:
@@ -180,51 +180,21 @@ def corr(x, y=None):
 
 
 @experimental(as_of="0.4.0")
-def assert_ordination_results_equal(left, right):
-    """Assert that ordination results objects are equal.
+def e_matrix(distance_matrix):
+    """Compute E matrix from a distance matrix.
 
-    This is a helper function intended to be used in unit tests that need to
-    compare ``OrdinationResults`` objects.
+    Squares and divides by -2 the input elementwise. Eq. 9.20 in
+    Legendre & Legendre 1998."""
+    return distance_matrix * distance_matrix / -2
 
-    For numeric attributes (e.g., eigvals, site, etc.),
-    ``numpy.testing.assert_almost_equal`` is used. Otherwise,
-    ``numpy.testing.assert_equal`` is used for comparisons. An assertion is
-    in place to ensure the two objects are exactly the same type.
 
-    Parameters
-    ----------
-    left, right : OrdinationResults
-        Ordination results to be compared for equality.
-
-    Raises
-    ------
-    AssertionError
-        If the two objects are not equal.
+def f_matrix(E_matrix):
+    """Compute F matrix from E matrix.
 
-    """
-    npt.assert_equal(type(left) is type(right), True)
-
-    # eigvals should always be present
-    npt.assert_almost_equal(left.eigvals, right.eigvals)
-
-    # these attributes are strings, so can compare directly, even if one or
-    # both are None
-    npt.assert_equal(left.species_ids, right.species_ids)
-    npt.assert_equal(left.site_ids, right.site_ids)
-
-    # these attributes need to be checked that they are almost equal, but one
-    # or both can be None, which npt.assert_almost_equal doesn't like
-    _assert_optional_numeric_attr_equal(left.species, right.species)
-    _assert_optional_numeric_attr_equal(left.site, right.site)
-    _assert_optional_numeric_attr_equal(left.biplot, right.biplot)
-    _assert_optional_numeric_attr_equal(left.site_constraints,
-                                        right.site_constraints)
-    _assert_optional_numeric_attr_equal(left.proportion_explained,
-                                        right.proportion_explained)
-
-
-def _assert_optional_numeric_attr_equal(left, right):
-    if left is None or right is None:
-        npt.assert_equal(left, right)
-    else:
-        npt.assert_almost_equal(left, right)
+    Centring step: for each element, the mean of the corresponding
+    row and column are substracted, and the mean of the whole
+    matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
+    row_means = E_matrix.mean(axis=1, keepdims=True)
+    col_means = E_matrix.mean(axis=0, keepdims=True)
+    matrix_mean = E_matrix.mean()
+    return E_matrix - row_means - col_means + matrix_mean
diff --git a/skbio/stats/ordination/tests/data/example2_Y b/skbio/stats/ordination/tests/data/example2_Y
index 68e6583..cfff1af 100644
--- a/skbio/stats/ordination/tests/data/example2_Y
+++ b/skbio/stats/ordination/tests/data/example2_Y
@@ -1,12 +1,10 @@
-1 0 0 0 0 0 
-0 0 0 0 0 0 
-0 1 0 0 0 0 
-11 4 0 0 8 1 
-11 5 17 7 0 0 
-9 6 0 0 6 2 
-9 7 13 10 0 0 
-7 8 0 0 4 3 
-7 9 10 13 0 0 
-5 10 0 0 2 4 
-
-
+1 0 0 0 0 0
+0 0 0 0 0 0
+0 1 0 0 0 0
+11 4 0 0 8 1
+11 5 17 7 0 0
+9 6 0 0 6 2
+9 7 13 10 0 0
+7 8 0 0 4 3
+7 9 10 13 0 0
+5 10 0 0 2 4
diff --git a/skbio/stats/ordination/tests/data/example2_biplot_scaling1 b/skbio/stats/ordination/tests/data/example2_biplot_scaling1
new file mode 100644
index 0000000..b1a775f
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_biplot_scaling1
@@ -0,0 +1,4 @@
+0.422650 -0.559143 -0.713251 1.165734e-16 1.471046e-16 1.831868e-16
+0.988496 0.150787 -0.011785 6.106227e-17 6.661338e-17 8.326673e-17
+-0.556517 0.817600 0.147714 -4.996004e-17 4.440892e-17 -7.216450e-17
+-0.404080 -0.905843 -0.127150 2.775558e-18 -2.220446e-17 0.000000e+00
diff --git a/skbio/stats/ordination/tests/data/example2_biplot_scaling2 b/skbio/stats/ordination/tests/data/example2_biplot_scaling2
new file mode 100644
index 0000000..77a7490
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_biplot_scaling2
@@ -0,0 +1,4 @@
+ 0.422650 -0.559143 -0.713251 1.165734e-16 1.471046e-16 1.831868e-16
+ 0.988496 0.150787 -0.011785 6.106227e-17 6.661338e-17 8.326673e-17
+-0.556517 0.817600 0.147714 -4.996004e-17 4.440892e-17 -7.216450e-17
+-0.404080 -0.905843 -0.127150 2.775558e-18 -2.220446e-17 0.000000e+00
\ No newline at end of file
diff --git a/skbio/stats/ordination/tests/data/example2_sample_constraints_scaling1 b/skbio/stats/ordination/tests/data/example2_sample_constraints_scaling1
new file mode 100644
index 0000000..1f27c66
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_sample_constraints_scaling1
@@ -0,0 +1,10 @@
+-1.203552 0.973291 0.398346 -4.377164e-02 -2.025459e-01 -4.174845e-02  2.251712e-03
+-1.233129 1.048075 0.112959 1.946350e-16 -3.553872e-16 8.349689e-02 -1.554395e-16
+-1.262706 1.122859 -0.172429 4.377164e-02 2.025459e-01 -4.174845e-02 -2.251712e-03
+-0.629153 -1.155379 0.778203 -3.794874e-01 5.000171e-02 3.937851e-16  2.503876e-04
+2.249463 0.043725 0.561763 6.747053e-01 2.580938e-02 6.726139e-16  1.835041e-02
+-0.688307 -1.005810 0.207427 -1.264958e-01 1.666724e-02 -6.333665e-17  8.346252e-05
+2.190309 0.193293 -0.009012 -4.068089e-02 -1.574523e-02 -6.651371e-18 -3.978716e-02
+-0.747462 -0.856242 -0.363348 1.264958e-01 -1.666724e-02 -4.098446e-16 -8.346252e-05
+2.131154 0.342861 -0.579787 -6.340244e-01 -1.006415e-02 -4.849801e-16  2.143675e-02
+-0.806617 -0.706674 -0.934123 3.794874e-01 -5.000171e-02 -7.280846e-16 -2.503876e-04
diff --git a/skbio/stats/ordination/tests/data/example2_sample_constraints_scaling2 b/skbio/stats/ordination/tests/data/example2_sample_constraints_scaling2
new file mode 100644
index 0000000..b441047
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_sample_constraints_scaling2
@@ -0,0 +1,10 @@
+-1.481311 2.070632 1.420611 -2.272346e-01 -3.841304e+00 -2.304877e+00 2.600617e-01
+-1.517714 2.229732 0.402842 1.010421e-15 -6.739956e-15 4.609755e+00 -1.795250e-14
+-1.554117 2.388832 -0.614928 2.272346e-01 3.841304e+00 -2.304877e+00 -2.600617e-01
+-0.774350 -2.458015 2.775281 -1.970058e+00 9.482876e-01 2.174036e-14 2.891853e-02
+2.768601 0.093023 2.003399 3.502642e+00 4.894777e-01 3.713413e-14 2.119383e+00
+-0.847157 -2.139816 0.739742 -6.566859e-01 3.160959e-01 -3.496734e-15 9.639511e-03
+2.695794 0.411223 -0.032139 -2.111894e-01 -2.986100e-01 -3.672135e-16 -4.595222e+00
+-0.919963 -1.821616 -1.295796 6.566859e-01 -3.160959e-01 -2.262699e-14 -9.639511e-03
+2.622988 0.729422 -2.067677 -3.291452e+00 -1.908677e-01 -2.677512e-14 2.475840e+00
+-0.992770 -1.503417 -3.331334 1.970058e+00 -9.482876e-01 -4.019660e-14 -2.891853e-02
diff --git a/skbio/stats/ordination/tests/data/example3_biplot_scaling1 b/skbio/stats/ordination/tests/data/example3_biplot_scaling1
new file mode 100644
index 0000000..ded205b
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_biplot_scaling1
@@ -0,0 +1,3 @@
+-0.169747 0.630691 0.760769
+-0.994017 0.060953 -0.044937
+ 0.184353 -0.974868 0.030987
diff --git a/skbio/stats/ordination/tests/data/example3_biplot_scaling2 b/skbio/stats/ordination/tests/data/example3_biplot_scaling2
new file mode 100644
index 0000000..ded205b
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_biplot_scaling2
@@ -0,0 +1,3 @@
+-0.169747 0.630691 0.760769
+-0.994017 0.060953 -0.044937
+ 0.184353 -0.974868 0.030987
diff --git a/skbio/stats/ordination/tests/data/example3_sample_constraints_scaling1 b/skbio/stats/ordination/tests/data/example3_sample_constraints_scaling1
new file mode 100644
index 0000000..1e99e6d
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_sample_constraints_scaling1
@@ -0,0 +1,10 @@
+ 0.418807 -1.331731 -0.092311 -0.357222 -0.201114 -0.077319 0.024296 -0.127088 0.023765
+ 0.402119 -1.323781 0.064653 0.774418 0.400533 0.124249 0.046928  0.031790 -0.027526
+ 0.385431 -1.315831 0.221617 -0.893930 -0.432358 -0.106748 -0.138400  0.169415 0.011485
+ 0.670971 0.216324 -0.436938 0.191154 -0.206474 0.219195 -0.110093  0.000480 -0.000760
+-0.586949 0.028310 -0.314662 -0.175743 0.184258 0.048211 0.057137  0.011500 0.017735
+ 0.637595 0.232225 -0.123010 0.082373 -0.107580 -0.221430 0.169356  0.010714 -0.009033
+-0.620325 0.044211 -0.000734 -0.120892 -0.020910 -0.060213 -0.067075 -0.013264 -0.035699
+ 0.604219 0.248126 0.190919 -0.001622 0.236689 -0.162758 -0.145629 -0.005405 0.015899
+-0.653701 0.060112 0.313195 0.335670 -0.188566 0.011227 0.008564  0.001462 0.019120
+ 0.570843 0.264027 0.504847 -0.367420 0.068042 0.303396 0.104854 -0.008683 -0.009463
diff --git a/skbio/stats/ordination/tests/data/example3_sample_constraints_scaling2 b/skbio/stats/ordination/tests/data/example3_sample_constraints_scaling2
new file mode 100644
index 0000000..ea697a2
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_sample_constraints_scaling2
@@ -0,0 +1,10 @@
+ 0.692139 -3.080537 -0.328747 -1.245288 -1.072935 -0.506242 0.244127 -3.631648 1.163119
+ 0.664560 -3.062146 0.230249 2.699651 2.136829 0.813520 0.471530  0.908423 -1.347244
+ 0.636980 -3.043755 0.789246 -3.116275 -2.306609 -0.698930 -1.390626  4.841176 0.562103
+ 1.108876 0.500397 -1.556068 0.666370 -1.101532 1.435176 -1.106200  0.013703 -0.037180
+ -0.970016 0.065487 -1.120607 -0.612647 0.983007 0.315662 0.574110  0.328630 0.868028
+ 1.053717 0.537179 -0.438075 0.287157 -0.573935 -1.449806 1.701670  0.306164 -0.442116
+ -1.025175 0.102269 -0.002614 -0.421433 -0.111552 -0.394242 -0.673964 -0.379019 -1.747250
+ 0.998559 0.573961 0.679918 -0.005653 1.262724 -1.065657 -1.463266 -0.154459 0.778140
+ -1.080333 0.139050 1.115379 1.170159 -1.005992 0.073507 0.086046  0.041765 0.935820
+ 0.943400 0.610742 1.797911 -1.280840 0.363003 1.986480 1.053561 -0.248131 -0.463165
diff --git a/skbio/stats/ordination/tests/test_canonical_correspondence_analysis.py b/skbio/stats/ordination/tests/test_canonical_correspondence_analysis.py
new file mode 100644
index 0000000..fafc3f7
--- /dev/null
+++ b/skbio/stats/ordination/tests/test_canonical_correspondence_analysis.py
@@ -0,0 +1,174 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from unittest import TestCase, main
+
+from skbio import OrdinationResults
+from skbio.stats.ordination import cca
+from skbio.util import get_data_path, assert_ordination_results_equal
+
+
+class TestCCAErrors(TestCase):
+    def setUp(self):
+        """Data from table 11.3 in Legendre & Legendre 1998."""
+        self.Y = pd.DataFrame(np.loadtxt(get_data_path('example3_Y')))
+        self.X = pd.DataFrame(np.loadtxt(get_data_path('example3_X')))
+
+    def test_shape(self):
+        X, Y = self.X, self.Y
+        with npt.assert_raises(ValueError):
+            cca(Y, X[:-1])
+
+    def test_Y_values(self):
+        X, Y = self.X, self.Y
+        Y[0, 0] = -1
+        with npt.assert_raises(ValueError):
+            cca(Y, X)
+        Y[0] = 0
+        with npt.assert_raises(ValueError):
+            cca(Y, X)
+
+    def test_scaling(self):
+        X, Y = self.X, self.Y
+        with npt.assert_raises(NotImplementedError):
+            cca(Y, X, 3)
+
+    def test_all_zero_row(self):
+        X, Y = pd.DataFrame(np.zeros((3, 3))), pd.DataFrame(np.zeros((3, 3)))
+        with npt.assert_raises(ValueError):
+            cca(X, Y)
+
+
+class TestCCAResults1(TestCase):
+    def setUp(self):
+        """Data from table 11.3 in Legendre & Legendre 1998
+        (p. 590). Loaded results as computed with vegan 2.0-8 and
+        compared with table 11.5 if also there."""
+        self.feature_ids = ['Feature0', 'Feature1', 'Feature2', 'Feature3',
+                            'Feature4', 'Feature5', 'Feature6', 'Feature7',
+                            'Feature8']
+        self.sample_ids = ['Sample0', 'Sample1', 'Sample2', 'Sample3',
+                           'Sample4', 'Sample5', 'Sample6', 'Sample7',
+                           'Sample8', 'Sample9']
+        self.env_ids = ['Constraint0', 'Constraint1',
+                        'Constraint2', 'Constraint3']
+        self.pc_ids = ['CCA1', 'CCA2', 'CCA3', 'CCA4', 'CCA5', 'CCA6', 'CCA7',
+                       'CCA8', 'CCA9']
+        self.Y = pd.DataFrame(
+            np.loadtxt(get_data_path('example3_Y')),
+            columns=self.feature_ids,
+            index=self.sample_ids)
+        self.X = pd.DataFrame(
+            np.loadtxt(get_data_path('example3_X')),
+            columns=self.env_ids,
+            index=self.sample_ids
+            ).ix[:, :-1]
+
+    def test_scaling1(self):
+        scores = cca(self.Y, self.X, scaling=1)
+
+        # Load data as computed with vegan 2.0-8
+        vegan_features = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_species_scaling1_from_vegan')),
+            index=self.feature_ids,
+            columns=self.pc_ids)
+
+        vegan_samples = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_site_scaling1_from_vegan')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        sample_constraints = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_sample_constraints_scaling1')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        biplot_scores = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_biplot_scaling1')))
+
+        proportion_explained = pd.Series([0.466911, 0.238327, 0.100548,
+                                          0.104937, 0.044805, 0.029747,
+                                          0.012631, 0.001562, 0.000532],
+                                         index=self.pc_ids)
+        eigvals = pd.Series([0.366136, 0.186888, 0.078847, 0.082288,
+                             0.035135, 0.023327, 0.009905, 0.001225,
+                             0.000417], index=self.pc_ids)
+
+        exp = OrdinationResults(
+            'CCA', 'Canonical Correspondence Analysis',
+            samples=vegan_samples,
+            features=vegan_features,
+            sample_constraints=sample_constraints,
+            biplot_scores=biplot_scores,
+            proportion_explained=proportion_explained,
+            eigvals=eigvals)
+
+        assert_ordination_results_equal(scores, exp,
+                                        ignore_biplot_scores_labels=True,
+                                        decimal=6)
+
+    def test_scaling2(self):
+        scores = cca(self.Y, self.X, scaling=2)
+
+        # Load data as computed with vegan 2.0-8
+        vegan_features = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_species_scaling2_from_vegan')),
+            index=self.feature_ids,
+            columns=self.pc_ids)
+
+        vegan_samples = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_site_scaling2_from_vegan')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        sample_constraints = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_sample_constraints_scaling2')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        biplot_scores = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example3_biplot_scaling2')))
+
+        proportion_explained = pd.Series([0.466911, 0.238327, 0.100548,
+                                          0.104937, 0.044805, 0.029747,
+                                          0.012631, 0.001562, 0.000532],
+                                         index=self.pc_ids)
+        eigvals = pd.Series([0.366136, 0.186888, 0.078847, 0.082288,
+                             0.035135, 0.023327, 0.009905, 0.001225,
+                             0.000417], index=self.pc_ids)
+
+        exp = OrdinationResults(
+            'CCA', 'Canonical Correspondence Analysis',
+            samples=vegan_samples,
+            features=vegan_features,
+            sample_constraints=sample_constraints,
+            biplot_scores=biplot_scores,
+            proportion_explained=proportion_explained,
+            eigvals=eigvals)
+
+        assert_ordination_results_equal(scores, exp,
+                                        ignore_biplot_scores_labels=True,
+                                        decimal=6)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/ordination/tests/test_correspondence_analysis.py b/skbio/stats/ordination/tests/test_correspondence_analysis.py
new file mode 100644
index 0000000..d86f7f8
--- /dev/null
+++ b/skbio/stats/ordination/tests/test_correspondence_analysis.py
@@ -0,0 +1,194 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from scipy.spatial.distance import pdist
+from unittest import TestCase, main
+
+from skbio import OrdinationResults
+from skbio.stats.ordination import ca
+from skbio.util import get_data_path, assert_ordination_results_equal
+
+
+def chi_square_distance(data_table, between_rows=True):
+    """Computes the chi-square distance between two rows or columns of input.
+
+    It is a measure that has no upper limit, and it excludes double-zeros.
+
+    Parameters
+    ----------
+    data_table : 2D array_like
+        An array_like object of shape (n, p). The input must be a
+        frequency table (so that the sum of all cells equals 1, and
+        all values are non-negative).
+    between_rows : bool (defaults to True)
+        Indicates whether distance is computed between rows (default)
+        or columns.
+
+    Returns
+    -------
+    Y : ndarray
+        Returns a condensed distance matrix. For each i and j (where
+        i<j<n), the chi square distance between u=X[i] and v=X[j] is
+        computed and stored in `Y[(n choose 2) - (n - i choose 2) + (j
+        - i - 1)]`.
+
+    See Also
+    --------
+    scipy.spatial.distance.squareform
+
+    References
+    ----------
+    This coefficient appears in Legendre and Legendre (1998) as
+    formula 7.54 (as D_{16}). Another source is
+    http://www.springerreference.com/docs/html/chapterdbid/60817.html
+    """
+    data_table = np.asarray(data_table, dtype=np.float64)
+    if not np.allclose(data_table.sum(), 1):
+        raise ValueError("Input is not a frequency table: if it is an"
+                         " abundance table you could scale it as"
+                         " `data_table / data_table.sum()`.")
+    if np.any(data_table < 0):
+        raise ValueError("A frequency table can't have negative values.")
+
+    # The distances are always computed between the rows of F
+    F = data_table if between_rows else data_table.T
+
+    row_sums = F.sum(axis=1, keepdims=True)
+    column_sums = F.sum(axis=0)
+    scaled_F = F / (row_sums * np.sqrt(column_sums))
+
+    return pdist(scaled_F, 'euclidean')
+
+
+class TestChiSquareDistance(TestCase):
+    def test_errors(self):
+        a = np.array([[-0.5, 0],
+                      [1, 0.5]])
+        with npt.assert_raises(ValueError):
+            chi_square_distance(a)
+        b = np.array([[0.5, 0],
+                      [0.5, 0.1]])
+        with npt.assert_raises(ValueError):
+            chi_square_distance(b)
+
+    def test_results(self):
+        """Some random numbers."""
+        a = np.array([[0.02808988764,  0.056179775281,  0.084269662921,
+                       0.140449438202],
+                      [0.01404494382,  0.196629213483,  0.109550561798,
+                       0.033707865169],
+                      [0.02808988764,  0.112359550562,  0.056179775281,
+                       0.140449438202]])
+        dist = chi_square_distance(a)
+        expected = [0.91413919964333856,
+                    0.33651110106124049,
+                    0.75656884966269089]
+        npt.assert_almost_equal(dist, expected)
+
+    def test_results2(self):
+        """A tiny example from Legendre & Legendre 1998, p. 285."""
+        a = np.array([[0, 1, 1],
+                      [1, 0, 0],
+                      [0, 4, 4]])
+        dist = chi_square_distance(a / a.sum())
+        # Note L&L used a terrible calculator because they got a wrong
+        # number (says it's 3.477) :(
+        expected = [3.4785054261852175, 0, 3.4785054261852175]
+        npt.assert_almost_equal(dist, expected)
+
+
+class TestCAResults(TestCase):
+    def setUp(self):
+        """Data from table 9.11 in Legendre & Legendre 1998."""
+        self.X = np.loadtxt(get_data_path('L&L_CA_data'))
+        self.sample_ids = ['Site1', 'Site2', 'Site3']
+        self.feature_ids = ['Species1', 'Species2', 'Species3']
+        self.pc_ids = ['CA1', 'CA2']
+        self.contingency = pd.DataFrame(self.X, self.sample_ids,
+                                        self.feature_ids)
+
+    def test_scaling2(self):
+
+        eigvals = pd.Series(np.array([0.09613302, 0.04094181]), self.pc_ids)
+        # p. 460 L&L 1998
+        features = pd.DataFrame(np.array([[0.40887, -0.06955],  # F_hat
+                                          [-0.11539, 0.29977],
+                                          [-0.30997, -0.18739]]),
+                                self.feature_ids,
+                                self.pc_ids)
+        samples = pd.DataFrame(np.array([[-0.84896, -0.88276],  # V_hat
+                                         [-0.22046, 1.34482],
+                                         [1.66697, -0.47032]]),
+                               self.sample_ids,
+                               self.pc_ids)
+        exp = OrdinationResults('CA', 'Correspondance Analysis',
+                                eigvals=eigvals, features=features,
+                                samples=samples)
+
+        scores = ca(self.contingency, 2)
+
+        assert_ordination_results_equal(exp, scores, decimal=5,
+                                        ignore_directionality=True)
+
+    def test_scaling1(self):
+        eigvals = pd.Series(np.array([0.09613302, 0.04094181]), self.pc_ids)
+        # p. 458
+        features = pd.DataFrame(np.array([[1.31871, -0.34374],  # V
+                                          [-0.37215, 1.48150],
+                                          [-0.99972, -0.92612]]),
+                                self.feature_ids,
+                                self.pc_ids)
+        samples = pd.DataFrame(np.array([[-0.26322, -0.17862],  # F
+                                         [-0.06835, 0.27211],
+                                         [0.51685, -0.09517]]),
+                               self.sample_ids,
+                               self.pc_ids)
+        exp = OrdinationResults('CA', 'Correspondance Analysis',
+                                eigvals=eigvals, features=features,
+                                samples=samples)
+        scores = ca(self.contingency, 1)
+
+        assert_ordination_results_equal(exp, scores, decimal=5,
+                                        ignore_directionality=True)
+
+    def test_maintain_chi_square_distance_scaling1(self):
+        """In scaling 1, chi^2 distance among rows (samples) is equal to
+        euclidean distance between them in transformed space."""
+        frequencies = self.X / self.X.sum()
+        chi2_distances = chi_square_distance(frequencies)
+        transformed_sites = ca(self.contingency, 1).samples.values
+        euclidean_distances = pdist(transformed_sites, 'euclidean')
+        npt.assert_almost_equal(chi2_distances, euclidean_distances)
+
+    def test_maintain_chi_square_distance_scaling2(self):
+        """In scaling 2, chi^2 distance among columns (features) is
+        equal to euclidean distance between them in transformed space."""
+        frequencies = self.X / self.X.sum()
+        chi2_distances = chi_square_distance(frequencies, between_rows=False)
+        transformed_species = ca(self.contingency, 2).features.values
+        euclidean_distances = pdist(transformed_species, 'euclidean')
+        npt.assert_almost_equal(chi2_distances, euclidean_distances)
+
+
+class TestCAErrors(TestCase):
+    def setUp(self):
+        pass
+
+    def test_negative(self):
+        X = np.array([[1, 2], [-0.1, -2]])
+        with npt.assert_raises(ValueError):
+            ca(pd.DataFrame(X))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/ordination/tests/test_ordination.py b/skbio/stats/ordination/tests/test_ordination.py
deleted file mode 100644
index 2a401d3..0000000
--- a/skbio/stats/ordination/tests/test_ordination.py
+++ /dev/null
@@ -1,887 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-import six
-from six import binary_type, text_type
-
-import warnings
-import unittest
-
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-import numpy as np
-import numpy.testing as npt
-import pandas as pd
-from IPython.core.display import Image, SVG
-from nose.tools import assert_is_instance, assert_true
-from scipy.spatial.distance import pdist
-
-from skbio import DistanceMatrix
-from skbio.stats.ordination import (
-    CA, RDA, CCA, PCoA, OrdinationResults, corr, mean_and_std,
-    assert_ordination_results_equal)
-from skbio.util import get_data_path
-
-
-def normalize_signs(arr1, arr2):
-    """Change column signs so that "column" and "-column" compare equal.
-
-    This is needed because results of eigenproblmes can have signs
-    flipped, but they're still right.
-
-    Notes
-    =====
-
-    This function tries hard to make sure that, if you find "column"
-    and "-column" almost equal, calling a function like np.allclose to
-    compare them after calling `normalize_signs` succeeds.
-
-    To do so, it distinguishes two cases for every column:
-
-    - It can be all almost equal to 0 (this includes a column of
-      zeros).
-    - Otherwise, it has a value that isn't close to 0.
-
-    In the first case, no sign needs to be flipped. I.e., for
-    |epsilon| small, np.allclose(-epsilon, 0) is true if and only if
-    np.allclose(epsilon, 0) is.
-
-    In the second case, the function finds the number in the column
-    whose absolute value is largest. Then, it compares its sign with
-    the number found in the same index, but in the other array, and
-    flips the sign of the column as needed.
-    """
-    # Let's convert everyting to floating point numbers (it's
-    # reasonable to assume that eigenvectors will already be floating
-    # point numbers). This is necessary because np.array(1) /
-    # np.array(0) != np.array(1.) / np.array(0.)
-    arr1 = np.asarray(arr1, dtype=np.float64)
-    arr2 = np.asarray(arr2, dtype=np.float64)
-
-    if arr1.shape != arr2.shape:
-        raise ValueError(
-            "Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
-                                                                   arr2.shape)
-            )
-
-    # To avoid issues around zero, we'll compare signs of the values
-    # with highest absolute value
-    max_idx = np.abs(arr1).argmax(axis=0)
-    max_arr1 = arr1[max_idx, range(arr1.shape[1])]
-    max_arr2 = arr2[max_idx, range(arr2.shape[1])]
-
-    sign_arr1 = np.sign(max_arr1)
-    sign_arr2 = np.sign(max_arr2)
-
-    # Store current warnings, and ignore division by zero (like 1. /
-    # 0.) and invalid operations (like 0. / 0.)
-    wrn = np.seterr(invalid='ignore', divide='ignore')
-    differences = sign_arr1 / sign_arr2
-    # The values in `differences` can be:
-    #    1 -> equal signs
-    #   -1 -> diff signs
-    #   Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
-    np.seterr(**wrn)
-
-    # Now let's deal with cases where `differences != \pm 1`
-    special_cases = (~np.isfinite(differences)) | (differences == 0)
-    # In any of these cases, the sign of the column doesn't matter, so
-    # let's just keep it
-    differences[special_cases] = 1
-
-    return arr1 * differences, arr2
-
-
-def chi_square_distance(data_table, between_rows=True):
-    """Computes the chi-square distance between two rows or columns of input.
-
-    It is a measure that has no upper limit, and it excludes double-zeros.
-
-    Parameters
-    ----------
-    data_table : 2D array_like
-        An array_like object of shape (n, p). The input must be a
-        frequency table (so that the sum of all cells equals 1, and
-        all values are non-negative).
-    between_rows : bool (defaults to True)
-        Indicates whether distance is computed between rows (default)
-        or columns.
-
-    Returns
-    -------
-    Y : ndarray
-        Returns a condensed distance matrix. For each i and j (where
-        i<j<n), the chi square distance between u=X[i] and v=X[j] is
-        computed and stored in `Y[(n choose 2) - (n - i choose 2) + (j
-        - i - 1)]`.
-
-    See Also
-    --------
-    scipy.spatial.distance.squareform
-
-    References
-    ----------
-    This coefficient appears in Legendre and Legendre (1998) as
-    formula 7.54 (as D_{16}). Another source is
-    http://www.springerreference.com/docs/html/chapterdbid/60817.html
-    """
-    data_table = np.asarray(data_table, dtype=np.float64)
-    if not np.allclose(data_table.sum(), 1):
-        raise ValueError("Input is not a frequency table: if it is an"
-                         " abundance table you could scale it as"
-                         " `data_table / data_table.sum()`.")
-    if np.any(data_table < 0):
-        raise ValueError("A frequency table can't have negative values.")
-
-    # The distances are always computed between the rows of F
-    F = data_table if between_rows else data_table.T
-
-    row_sums = F.sum(axis=1, keepdims=True)
-    column_sums = F.sum(axis=0)
-    scaled_F = F / (row_sums * np.sqrt(column_sums))
-
-    return pdist(scaled_F, 'euclidean')
-
-
-class TestNormalizeSigns(object):
-    def test_shapes_and_nonarray_input(self):
-        with npt.assert_raises(ValueError):
-            normalize_signs([[1, 2], [3, 5]], [[1, 2]])
-
-    def test_works_when_different(self):
-        """Taking abs value of everything would lead to false
-        positives."""
-        a = np.array([[1, -1],
-                      [2, 2]])
-        b = np.array([[-1, -1],
-                      [2, 2]])
-        with npt.assert_raises(AssertionError):
-            npt.assert_equal(*normalize_signs(a, b))
-
-    def test_easy_different(self):
-        a = np.array([[1, 2],
-                      [3, -1]])
-        b = np.array([[-1, 2],
-                      [-3, -1]])
-        npt.assert_equal(*normalize_signs(a, b))
-
-    def test_easy_already_equal(self):
-        a = np.array([[1, -2],
-                      [3, 1]])
-        b = a.copy()
-        npt.assert_equal(*normalize_signs(a, b))
-
-    def test_zeros(self):
-        a = np.array([[0, 3],
-                      [0, -1]])
-        b = np.array([[0, -3],
-                      [0, 1]])
-        npt.assert_equal(*normalize_signs(a, b))
-
-    def test_hard(self):
-        a = np.array([[0, 1],
-                      [1, 2]])
-        b = np.array([[0, 1],
-                      [-1, 2]])
-        npt.assert_equal(*normalize_signs(a, b))
-
-    def test_harder(self):
-        """We don't want a value that might be negative due to
-        floating point inaccuracies to make a call to allclose in the
-        result to be off."""
-        a = np.array([[-1e-15, 1],
-                      [5, 2]])
-        b = np.array([[1e-15, 1],
-                      [5, 2]])
-        # Clearly a and b would refer to the same "column
-        # eigenvectors" but a slopppy implementation of
-        # normalize_signs could change the sign of column 0 and make a
-        # comparison fail
-        npt.assert_almost_equal(*normalize_signs(a, b))
-
-    def test_column_zeros(self):
-        a = np.array([[0, 1],
-                      [0, 2]])
-        b = np.array([[0, -1],
-                      [0, -2]])
-        npt.assert_equal(*normalize_signs(a, b))
-
-    def test_column_almost_zero(self):
-        a = np.array([[1e-15, 3],
-                      [-2e-14, -6]])
-        b = np.array([[0, 3],
-                      [-1e-15, -6]])
-        npt.assert_almost_equal(*normalize_signs(a, b))
-
-
-class TestChiSquareDistance(object):
-    def test_errors(self):
-        a = np.array([[-0.5, 0],
-                      [1, 0.5]])
-        with npt.assert_raises(ValueError):
-            chi_square_distance(a)
-        b = np.array([[0.5, 0],
-                      [0.5, 0.1]])
-        with npt.assert_raises(ValueError):
-            chi_square_distance(b)
-
-    def test_results(self):
-        """Some random numbers."""
-        a = np.array([[0.02808988764,  0.056179775281,  0.084269662921,
-                       0.140449438202],
-                      [0.01404494382,  0.196629213483,  0.109550561798,
-                       0.033707865169],
-                      [0.02808988764,  0.112359550562,  0.056179775281,
-                       0.140449438202]])
-        dist = chi_square_distance(a)
-        expected = [0.91413919964333856,
-                    0.33651110106124049,
-                    0.75656884966269089]
-        npt.assert_almost_equal(dist, expected)
-
-    def test_results2(self):
-        """A tiny example from Legendre & Legendre 1998, p. 285."""
-        a = np.array([[0, 1, 1],
-                      [1, 0, 0],
-                      [0, 4, 4]])
-        dist = chi_square_distance(a / a.sum())
-        # Note L&L used a terrible calculator because they got a wrong
-        # number (says it's 3.477) :(
-        expected = [3.4785054261852175, 0, 3.4785054261852175]
-        npt.assert_almost_equal(dist, expected)
-
-
-class TestUtils(object):
-    def setup(self):
-        self.x = np.array([[1, 2, 3], [4, 5, 6]])
-        self.y = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
-
-    def test_mean_and_std_no_mean_no_std(self):
-        with npt.assert_raises(ValueError):
-            mean_and_std(self.x, with_mean=False, with_std=False)
-
-    def test_corr_shape_mismatch(self):
-        with npt.assert_raises(ValueError):
-            corr(self.x, self.y)
-
-    def test_assert_ordination_results_equal(self):
-        minimal1 = OrdinationResults([1, 2])
-
-        # a minimal set of results should be equal to itself
-        assert_ordination_results_equal(minimal1, minimal1)
-
-        # type mismatch
-        with npt.assert_raises(AssertionError):
-            assert_ordination_results_equal(minimal1, 'foo')
-
-        # numeric values should be checked that they're almost equal
-        almost_minimal1 = OrdinationResults([1.0000001, 1.9999999])
-        assert_ordination_results_equal(minimal1, almost_minimal1)
-
-        # species_ids missing in one, present in the other
-        almost_minimal1.species_ids = ['abc', 'def']
-        with npt.assert_raises(AssertionError):
-            assert_ordination_results_equal(minimal1, almost_minimal1)
-        almost_minimal1.species_ids = None
-
-        # site_ids missing in one, present in the other
-        almost_minimal1.site_ids = ['abc', 'def']
-        with npt.assert_raises(AssertionError):
-            assert_ordination_results_equal(minimal1, almost_minimal1)
-        almost_minimal1.site_ids = None
-
-        # test each of the optional numeric attributes
-        for attr in ('species', 'site', 'biplot', 'site_constraints',
-                     'proportion_explained'):
-            # missing optional numeric attribute in one, present in the other
-            setattr(almost_minimal1, attr, [[1, 2], [3, 4]])
-            with npt.assert_raises(AssertionError):
-                assert_ordination_results_equal(minimal1, almost_minimal1)
-            setattr(almost_minimal1, attr, None)
-
-            # optional numeric attributes present in both, but not almost equal
-            setattr(minimal1, attr, [[1, 2], [3, 4]])
-            setattr(almost_minimal1, attr, [[1, 2], [3.00002, 4]])
-            with npt.assert_raises(AssertionError):
-                assert_ordination_results_equal(minimal1, almost_minimal1)
-            setattr(minimal1, attr, None)
-            setattr(almost_minimal1, attr, None)
-
-            # optional numeric attributes present in both, and almost equal
-            setattr(minimal1, attr, [[1, 2], [3, 4]])
-            setattr(almost_minimal1, attr, [[1, 2], [3.00000002, 4]])
-            assert_ordination_results_equal(minimal1, almost_minimal1)
-            setattr(minimal1, attr, None)
-            setattr(almost_minimal1, attr, None)
-
-
-class TestCAResults(object):
-    def setup(self):
-        """Data from table 9.11 in Legendre & Legendre 1998."""
-        self.X = np.loadtxt(get_data_path('L&L_CA_data'))
-        self.ordination = CA(self.X, ['Site1', 'Site2', 'Site3'],
-                             ['Species1', 'Species2', 'Species3'])
-
-    def test_scaling2(self):
-        scores = self.ordination.scores(scaling=2)
-        # p. 460 L&L 1998
-        F_hat = np.array([[0.40887, -0.06955],
-                          [-0.11539,  0.29977],
-                          [-0.30997, -0.18739]])
-        npt.assert_almost_equal(*normalize_signs(F_hat, scores.species),
-                                decimal=5)
-        V_hat = np.array([[-0.84896, -0.88276],
-                          [-0.22046,  1.34482],
-                          [1.66697, -0.47032]])
-        npt.assert_almost_equal(*normalize_signs(V_hat, scores.site),
-                                decimal=5)
-
-    def test_scaling1(self):
-        scores = self.ordination.scores(scaling=1)
-        # p. 458
-        V = np.array([[1.31871, -0.34374],
-                      [-0.37215,  1.48150],
-                      [-0.99972, -0.92612]])
-        npt.assert_almost_equal(*normalize_signs(V, scores.species), decimal=5)
-        F = np.array([[-0.26322, -0.17862],
-                      [-0.06835,  0.27211],
-                      [0.51685, -0.09517]])
-        npt.assert_almost_equal(*normalize_signs(F, scores.site), decimal=5)
-
-    def test_maintain_chi_square_distance_scaling1(self):
-        """In scaling 1, chi^2 distance among rows (sites) is equal to
-        euclidean distance between them in transformed space."""
-        frequencies = self.X / self.X.sum()
-        chi2_distances = chi_square_distance(frequencies)
-        transformed_sites = self.ordination.scores(1).site
-        euclidean_distances = pdist(transformed_sites, 'euclidean')
-        npt.assert_almost_equal(chi2_distances, euclidean_distances)
-
-    def test_maintain_chi_square_distance_scaling2(self):
-        """In scaling 2, chi^2 distance among columns (species) is
-        equal to euclidean distance between them in transformed space."""
-        frequencies = self.X / self.X.sum()
-        chi2_distances = chi_square_distance(frequencies, between_rows=False)
-        transformed_species = self.ordination.scores(2).species
-        euclidean_distances = pdist(transformed_species, 'euclidean')
-        npt.assert_almost_equal(chi2_distances, euclidean_distances)
-
-
-class TestCAErrors(object):
-    def test_negative(self):
-        X = np.array([[1, 2], [-0.1, -2]])
-        with npt.assert_raises(ValueError):
-            CA(X, None, None)
-
-
-class TestRDAErrors(object):
-    def test_shape(self):
-        for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
-            Y = np.random.randn(n, p)
-            X = np.random.randn(n_, m)
-            yield npt.assert_raises, ValueError, RDA, Y, X, None, None
-
-
-class TestRDAResults(object):
-    # STATUS: L&L only shows results with scaling 1, and they agree
-    # with vegan's (module multiplying by a constant). I can also
-    # compute scaling 2, agreeing with vegan, but there are no written
-    # results in L&L.
-    def setup(self):
-        """Data from table 11.3 in Legendre & Legendre 1998."""
-        Y = np.loadtxt(get_data_path('example2_Y'))
-        X = np.loadtxt(get_data_path('example2_X'))
-        self.ordination = RDA(Y, X,
-                              ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
-                               'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
-                              ['Species0', 'Species1', 'Species2', 'Species3',
-                               'Species4', 'Species5'])
-
-    def test_scaling1(self):
-        scores = self.ordination.scores(1)
-
-        # Load data as computed with vegan 2.0-8
-        vegan_species = np.loadtxt(get_data_path(
-            'example2_species_scaling1_from_vegan'))
-        npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
-
-        vegan_site = np.loadtxt(get_data_path(
-            'example2_site_scaling1_from_vegan'))
-        npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
-
-    def test_scaling2(self):
-        scores = self.ordination.scores(2)
-
-        # Load data as computed with vegan 2.0-8
-        vegan_species = np.loadtxt(get_data_path(
-            'example2_species_scaling2_from_vegan'))
-        npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
-
-        vegan_site = np.loadtxt(get_data_path(
-            'example2_site_scaling2_from_vegan'))
-        npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
-
-
-class TestCCAErrors(object):
-    def setup(self):
-        """Data from table 11.3 in Legendre & Legendre 1998."""
-        self.Y = np.loadtxt(get_data_path('example3_Y'))
-        self.X = np.loadtxt(get_data_path('example3_X'))
-
-    def test_shape(self):
-        X, Y = self.X, self.Y
-        with npt.assert_raises(ValueError):
-            CCA(Y, X[:-1], None, None)
-
-    def test_Y_values(self):
-        X, Y = self.X, self.Y
-        Y[0, 0] = -1
-        with npt.assert_raises(ValueError):
-            CCA(Y, X, None, None)
-        Y[0] = 0
-        with npt.assert_raises(ValueError):
-            CCA(Y, X, None, None)
-
-
-class TestCCAResults(object):
-    def setup(self):
-        """Data from table 11.3 in Legendre & Legendre 1998
-        (p. 590). Loaded results as computed with vegan 2.0-8 and
-        compared with table 11.5 if also there."""
-        Y = np.loadtxt(get_data_path('example3_Y'))
-        X = np.loadtxt(get_data_path('example3_X'))
-        self.ordination = CCA(Y, X[:, :-1],
-                              ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
-                               'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
-                              ['Species0', 'Species1', 'Species2', 'Species3',
-                               'Species4', 'Species5', 'Species6', 'Species7',
-                               'Species8'])
-
-    def test_scaling1_species(self):
-        scores = self.ordination.scores(1)
-
-        vegan_species = np.loadtxt(get_data_path(
-            'example3_species_scaling1_from_vegan'))
-        npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
-
-    def test_scaling1_site(self):
-        scores = self.ordination.scores(1)
-
-        vegan_site = np.loadtxt(get_data_path(
-            'example3_site_scaling1_from_vegan'))
-        npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
-
-    def test_scaling2_species(self):
-        scores = self.ordination.scores(2)
-
-        vegan_species = np.loadtxt(get_data_path(
-            'example3_species_scaling2_from_vegan'))
-        npt.assert_almost_equal(scores.species, vegan_species, decimal=5)
-
-    def test_scaling2_site(self):
-        scores = self.ordination.scores(2)
-
-        vegan_site = np.loadtxt(get_data_path(
-            'example3_site_scaling2_from_vegan'))
-        npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
-
-
-class TestPCoAResults(object):
-    def setup(self):
-        """Sample data set from page 111 of W.J Krzanowski. Principles
-        of multivariate analysis, 2000, Oxford University Press."""
-        matrix = np.loadtxt(get_data_path('PCoA_sample_data'))
-        dist_matrix = DistanceMatrix(matrix, map(str, range(matrix.shape[0])))
-        self.dist_matrix = dist_matrix
-
-    def test_negative_eigenvalue_warning(self):
-        """This data has some small negative eigenvalues."""
-        npt.assert_warns(RuntimeWarning, PCoA, self.dist_matrix)
-
-    def test_values(self):
-        """Adapted from cogent's `test_principal_coordinate_analysis`:
-        "I took the example in the book (see intro info), and did the
-        principal coordinates analysis, plotted the data and it looked
-        right"."""
-        with warnings.catch_warnings():
-            warnings.filterwarnings('ignore', category=RuntimeWarning)
-            ordination = PCoA(self.dist_matrix)
-        scores = ordination.scores()
-
-        exp_eigvals = np.array([0.73599103, 0.26260032, 0.14926222, 0.06990457,
-                                0.02956972, 0.01931184, 0., 0., 0., 0., 0., 0.,
-                                0., 0.])
-        exp_site = np.loadtxt(get_data_path('exp_PCoAzeros_site'))
-        exp_prop_expl = np.array([0.58105792, 0.20732046, 0.1178411,
-                                  0.05518899, 0.02334502, 0.01524651, 0., 0.,
-                                  0., 0., 0., 0., 0., 0.])
-        exp_site_ids = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
-                        '10', '11', '12', '13']
-        # Note the absolute value because column can have signs swapped
-        npt.assert_almost_equal(scores.eigvals, exp_eigvals)
-        npt.assert_almost_equal(np.abs(scores.site), exp_site)
-        npt.assert_almost_equal(scores.proportion_explained, exp_prop_expl)
-        npt.assert_equal(scores.site_ids, exp_site_ids)
-
-
-class TestPCoAResultsExtensive(object):
-    def setup(self):
-        matrix = np.loadtxt(get_data_path('PCoA_sample_data_2'))
-        self.ids = [str(i) for i in range(matrix.shape[0])]
-        dist_matrix = DistanceMatrix(matrix, self.ids)
-        self.ordination = PCoA(dist_matrix)
-
-    def test_values(self):
-        results = self.ordination.scores()
-
-        npt.assert_equal(len(results.eigvals), len(results.site[0]))
-
-        expected = np.array([[-0.028597, 0.22903853, 0.07055272,
-                              0.26163576, 0.28398669, 0.0],
-                             [0.37494056, 0.22334055, -0.20892914,
-                              0.05057395, -0.18710366, 0.0],
-                             [-0.33517593, -0.23855979, -0.3099887,
-                              0.11521787, -0.05021553, 0.0],
-                             [0.25412394, -0.4123464, 0.23343642,
-                              0.06403168, -0.00482608, 0.0],
-                             [-0.28256844, 0.18606911, 0.28875631,
-                              -0.06455635, -0.21141632, 0.0],
-                             [0.01727687, 0.012458, -0.07382761,
-                              -0.42690292, 0.1695749, 0.0]])
-        npt.assert_almost_equal(*normalize_signs(expected, results.site))
-
-        expected = np.array([0.3984635, 0.36405689, 0.28804535, 0.27479983,
-                            0.19165361, 0.0])
-        npt.assert_almost_equal(results.eigvals, expected)
-
-        expected = np.array([0.2626621381, 0.2399817314, 0.1898758748,
-                             0.1811445992, 0.1263356565, 0.0])
-        npt.assert_almost_equal(results.proportion_explained, expected)
-
-        npt.assert_equal(results.site_ids, self.ids)
-
-
-class TestPCoAEigenResults(object):
-    def setup(self):
-        dist_matrix = DistanceMatrix.read(get_data_path('PCoA_sample_data_3'))
-        self.ordination = PCoA(dist_matrix)
-
-        self.ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
-                    'PC.355', 'PC.607', 'PC.634']
-
-    def test_values(self):
-        results = self.ordination.scores()
-
-        npt.assert_almost_equal(len(results.eigvals), len(results.site[0]))
-
-        expected = np.loadtxt(get_data_path('exp_PCoAEigenResults_site'))
-        npt.assert_almost_equal(*normalize_signs(expected, results.site))
-
-        expected = np.array([0.51236726, 0.30071909, 0.26791207, 0.20898868,
-                             0.19169895, 0.16054235,  0.15017696,  0.12245775,
-                             0.0])
-        npt.assert_almost_equal(results.eigvals, expected)
-
-        expected = np.array([0.2675738328, 0.157044696, 0.1399118638,
-                             0.1091402725, 0.1001110485, 0.0838401162,
-                             0.0784269939, 0.0639511764, 0.0])
-        npt.assert_almost_equal(results.proportion_explained, expected)
-
-        npt.assert_equal(results.site_ids, self.ids)
-
-
-class TestPCoAPrivateMethods(object):
-    def setup(self):
-        self.matrix = np.arange(1, 7).reshape(2, 3)
-        self.matrix2 = np.arange(1, 10).reshape(3, 3)
-
-    def test_E_matrix(self):
-        E = PCoA._E_matrix(self.matrix)
-        expected_E = np.array([[-0.5,  -2.,  -4.5],
-                               [-8., -12.5, -18.]])
-        npt.assert_almost_equal(E, expected_E)
-
-    def test_F_matrix(self):
-        F = PCoA._F_matrix(self.matrix2)
-        expected_F = np.zeros((3, 3))
-        # Note that `test_make_F_matrix` in cogent is wrong
-        npt.assert_almost_equal(F, expected_F)
-
-
-class TestPCoAErrors(object):
-    def test_input(self):
-        with npt.assert_raises(TypeError):
-            PCoA([[1, 2], [3, 4]])
-
-
-class TestOrdinationResults(unittest.TestCase):
-    def setUp(self):
-        # Define in-memory CA results to serialize and deserialize.
-        eigvals = np.array([0.0961330159181, 0.0409418140138])
-        species = np.array([[0.408869425742, 0.0695518116298],
-                            [-0.1153860437, -0.299767683538],
-                            [-0.309967102571, 0.187391917117]])
-        site = np.array([[-0.848956053187, 0.882764759014],
-                         [-0.220458650578, -1.34482000302],
-                         [1.66697179591, 0.470324389808]])
-        biplot = None
-        site_constraints = None
-        prop_explained = None
-        species_ids = ['Species1', 'Species2', 'Species3']
-        site_ids = ['Site1', 'Site2', 'Site3']
-
-        self.ordination_results = OrdinationResults(
-            eigvals=eigvals, species=species, site=site, biplot=biplot,
-            site_constraints=site_constraints,
-            proportion_explained=prop_explained, species_ids=species_ids,
-            site_ids=site_ids)
-
-        # DataFrame for testing plot method. Has a categorical column with a
-        # mix of numbers and strings. Has a numeric column with a mix of ints,
-        # floats, and strings that can be converted to floats. Has a numeric
-        # column with missing data (np.nan).
-        self.df = pd.DataFrame([['foo', '42', 10],
-                                [22, 0, 8],
-                                [22, -4.2, np.nan],
-                                ['foo', '42.19', 11]],
-                               index=['A', 'B', 'C', 'D'],
-                               columns=['categorical', 'numeric', 'nancolumn'])
-
-        # Minimal ordination results for easier testing of plotting method.
-        # Paired with df above.
-        eigvals = np.array([0.50, 0.25, 0.25])
-        site = np.array([[0.1, 0.2, 0.3],
-                         [0.2, 0.3, 0.4],
-                         [0.3, 0.4, 0.5],
-                         [0.4, 0.5, 0.6]])
-        self.min_ord_results = OrdinationResults(eigvals=eigvals, site=site,
-                                                 site_ids=['A', 'B', 'C', 'D'])
-
-    def test_str(self):
-        exp = ("Ordination results:\n"
-               "\tEigvals: 2\n"
-               "\tProportion explained: N/A\n"
-               "\tSpecies: 3x2\n"
-               "\tSite: 3x2\n"
-               "\tBiplot: N/A\n"
-               "\tSite constraints: N/A\n"
-               "\tSpecies IDs: 'Species1', 'Species2', 'Species3'\n"
-               "\tSite IDs: 'Site1', 'Site2', 'Site3'")
-        obs = str(self.ordination_results)
-        self.assertEqual(obs, exp)
-
-        # all optional attributes missing
-        exp = ("Ordination results:\n"
-               "\tEigvals: 1\n"
-               "\tProportion explained: N/A\n"
-               "\tSpecies: N/A\n"
-               "\tSite: N/A\n"
-               "\tBiplot: N/A\n"
-               "\tSite constraints: N/A\n"
-               "\tSpecies IDs: N/A\n"
-               "\tSite IDs: N/A")
-        obs = str(OrdinationResults(np.array([4.2])))
-        self.assertEqual(obs, exp)
-
-    def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
-                                  exp_legend_exists, exp_xlabel, exp_ylabel,
-                                  exp_zlabel):
-        # check type
-        assert_is_instance(fig, mpl.figure.Figure)
-
-        # check number of subplots
-        axes = fig.get_axes()
-        npt.assert_equal(len(axes), exp_num_subplots)
-
-        # check title
-        ax = axes[0]
-        npt.assert_equal(ax.get_title(), exp_title)
-
-        # shouldn't have tick labels
-        for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
-                           ax.get_zticklabels()):
-            npt.assert_equal(tick_label.get_text(), '')
-
-        # check if legend is present
-        legend = ax.get_legend()
-        if exp_legend_exists:
-            assert_true(legend is not None)
-        else:
-            assert_true(legend is None)
-
-        # check axis labels
-        npt.assert_equal(ax.get_xlabel(), exp_xlabel)
-        npt.assert_equal(ax.get_ylabel(), exp_ylabel)
-        npt.assert_equal(ax.get_zlabel(), exp_zlabel)
-
-    def test_plot_no_metadata(self):
-        fig = self.min_ord_results.plot()
-        self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
-
-    def test_plot_with_numeric_metadata_and_plot_options(self):
-        fig = self.min_ord_results.plot(
-            self.df, 'numeric', axes=(1, 0, 2),
-            axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
-        self.check_basic_figure_sanity(
-            fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
-
-    def test_plot_with_categorical_metadata_and_plot_options(self):
-        fig = self.min_ord_results.plot(
-            self.df, 'categorical', axes=[2, 0, 1], title='a title',
-            cmap='Accent')
-        self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
-
-    def test_plot_with_invalid_axis_labels(self):
-        with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
-            self.min_ord_results.plot(axes=[2, 0, 1],
-                                      axis_labels=('a', 'b', 'c', 'd'))
-
-    def test_validate_plot_axes_valid_input(self):
-        # shouldn't raise an error on valid input. nothing is returned, so
-        # nothing to check here
-        self.min_ord_results._validate_plot_axes(self.min_ord_results.site.T,
-                                                 (1, 2, 0))
-
-    def test_validate_plot_axes_invalid_input(self):
-        # not enough dimensions
-        with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
-            self.min_ord_results._validate_plot_axes(
-                np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
-
-        coord_matrix = self.min_ord_results.site.T
-
-        # wrong number of axes
-        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, [])
-        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
-            self.min_ord_results._validate_plot_axes(coord_matrix,
-                                                     (0, 1, 2, 3))
-
-        # duplicate axes
-        with six.assertRaisesRegex(self, ValueError, 'must be unique'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
-
-        # out of range axes
-        with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
-        with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
-
-    def test_get_plot_point_colors_invalid_input(self):
-        # column provided without df
-        with npt.assert_raises(ValueError):
-            self.min_ord_results._get_plot_point_colors(None, 'numeric',
-                                                        ['B', 'C'], 'jet')
-
-        # df provided without column
-        with npt.assert_raises(ValueError):
-            self.min_ord_results._get_plot_point_colors(self.df, None,
-                                                        ['B', 'C'], 'jet')
-
-        # column not in df
-        with six.assertRaisesRegex(self, ValueError, 'missingcol'):
-            self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
-                                                        ['B', 'C'], 'jet')
-
-        # id not in df
-        with six.assertRaisesRegex(self, ValueError, 'numeric'):
-            self.min_ord_results._get_plot_point_colors(
-                self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
-
-        # missing data in df
-        with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
-            self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
-                                                        ['B', 'C', 'A'], 'jet')
-
-    def test_get_plot_point_colors_no_df_or_column(self):
-        obs = self.min_ord_results._get_plot_point_colors(None, None,
-                                                          ['B', 'C'], 'jet')
-        npt.assert_equal(obs, (None, None))
-
-    def test_get_plot_point_colors_numeric_column(self):
-        # subset of the ids in df
-        exp = [0.0, -4.2, 42.0]
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'numeric', ['B', 'C', 'A'], 'jet')
-        npt.assert_almost_equal(obs[0], exp)
-        assert_true(obs[1] is None)
-
-        # all ids in df
-        exp = [0.0, 42.0, 42.19, -4.2]
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
-        npt.assert_almost_equal(obs[0], exp)
-        assert_true(obs[1] is None)
-
-    def test_get_plot_point_colors_categorical_column(self):
-        # subset of the ids in df
-        exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
-        exp_color_dict = {
-            'foo': [0.5, 0., 0., 1.],
-            22: [0., 0., 0.5, 1.]
-        }
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'categorical', ['B', 'C', 'A'], 'jet')
-        npt.assert_almost_equal(obs[0], exp_colors)
-        npt.assert_equal(obs[1], exp_color_dict)
-
-        # all ids in df
-        exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
-                      [0., 0., 0.5, 1.]]
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
-        npt.assert_almost_equal(obs[0], exp_colors)
-        # should get same color dict as before
-        npt.assert_equal(obs[1], exp_color_dict)
-
-    def test_plot_categorical_legend(self):
-        fig = plt.figure()
-        ax = fig.add_subplot(111, projection='3d')
-
-        # we shouldn't have a legend yet
-        assert_true(ax.get_legend() is None)
-
-        self.min_ord_results._plot_categorical_legend(
-            ax, {'foo': 'red', 'bar': 'green'})
-
-        # make sure we have a legend now
-        legend = ax.get_legend()
-        assert_true(legend is not None)
-
-        # do some light sanity checking to make sure our input labels and
-        # colors are present. we're not using nose.tools.assert_items_equal
-        # because it isn't available in Python 3.
-        labels = [t.get_text() for t in legend.get_texts()]
-        npt.assert_equal(sorted(labels), ['bar', 'foo'])
-
-        colors = [l.get_color() for l in legend.get_lines()]
-        npt.assert_equal(sorted(colors), ['green', 'red'])
-
-    def test_repr_png(self):
-        obs = self.min_ord_results._repr_png_()
-        assert_is_instance(obs, binary_type)
-        assert_true(len(obs) > 0)
-
-    def test_repr_svg(self):
-        obs = self.min_ord_results._repr_svg_()
-        # print_figure(format='svg') can return text or bytes depending on the
-        # version of IPython
-        assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
-        assert_true(len(obs) > 0)
-
-    def test_png(self):
-        assert_is_instance(self.min_ord_results.png, Image)
-
-    def test_svg(self):
-        assert_is_instance(self.min_ord_results.svg, SVG)
-
-
-if __name__ == '__main__':
-    import nose
-    nose.runmodule()
diff --git a/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py b/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py
new file mode 100644
index 0000000..585edf2
--- /dev/null
+++ b/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py
@@ -0,0 +1,132 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import pandas as pd
+import numpy as np
+import numpy.testing as npt
+from unittest import TestCase, main
+
+from skbio import DistanceMatrix, OrdinationResults
+from skbio.stats.distance import DissimilarityMatrixError
+from skbio.stats.ordination import pcoa
+from skbio.util import get_data_path, assert_ordination_results_equal
+
+
+class TestPCoA(TestCase):
+    def setUp(self):
+        # Sample data set from page 111 of W.J Krzanowski. Principles
+        # of multivariate analysis, 2000, Oxford University Press.
+        self.dm = DistanceMatrix(
+            np.loadtxt(get_data_path('PCoA_sample_data')))
+
+    def test_simple(self):
+        eigvals = [0.51236726, 0.30071909, 0.26791207, 0.20898868,
+                   0.19169895, 0.16054235,  0.15017696,  0.12245775,
+                   0.0]
+        proportion_explained = [0.2675738328, 0.157044696, 0.1399118638,
+                                0.1091402725, 0.1001110485,
+                                0.0838401162, 0.0784269939,
+                                0.0639511764, 0.0]
+        sample_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354',
+                      'PC.593', 'PC.355', 'PC.607', 'PC.634']
+        axis_labels = ['PC%d' % i for i in range(1, 10)]
+
+        expected_results = OrdinationResults(
+            short_method_name='PCoA',
+            long_method_name='Principal Coordinate Analysis',
+            eigvals=pd.Series(eigvals, index=axis_labels),
+            samples=pd.DataFrame(
+                np.loadtxt(get_data_path('exp_PCoAEigenResults_site')),
+                index=sample_ids, columns=axis_labels),
+            proportion_explained=pd.Series(proportion_explained,
+                                           index=axis_labels))
+
+        dm = DistanceMatrix.read(get_data_path('PCoA_sample_data_3'))
+        results = pcoa(dm)
+
+        assert_ordination_results_equal(results, expected_results,
+                                        ignore_directionality=True)
+
+    def test_extensive(self):
+        eigvals = [0.3984635, 0.36405689, 0.28804535, 0.27479983,
+                   0.19165361, 0.0]
+        proportion_explained = [0.2626621381, 0.2399817314,
+                                0.1898758748, 0.1811445992,
+                                0.1263356565, 0.0]
+        sample_ids = [str(i) for i in range(6)]
+        axis_labels = ['PC%d' % i for i in range(1, 7)]
+        samples = [[-0.028597, 0.22903853, 0.07055272, 0.26163576,
+                    0.28398669, 0.0],
+                   [0.37494056, 0.22334055, -0.20892914, 0.05057395,
+                    -0.18710366, 0.0],
+                   [-0.33517593, -0.23855979, -0.3099887, 0.11521787,
+                    -0.05021553, 0.0],
+                   [0.25412394, -0.4123464, 0.23343642, 0.06403168,
+                    -0.00482608, 0.0],
+                   [-0.28256844, 0.18606911, 0.28875631, -0.06455635,
+                    -0.21141632, 0.0],
+                   [0.01727687, 0.012458, -0.07382761, -0.42690292,
+                    0.1695749, 0.0]]
+
+        expected_results = OrdinationResults(
+            short_method_name='PCoA',
+            long_method_name='Principal Coordinate Analysis',
+            eigvals=pd.Series(eigvals, index=axis_labels),
+            samples=pd.DataFrame(samples, index=sample_ids,
+                                 columns=axis_labels),
+            proportion_explained=pd.Series(proportion_explained,
+                                           index=axis_labels))
+
+        data = np.loadtxt(get_data_path('PCoA_sample_data_2'))
+        # test passing a numpy.ndarray and a DistanceMatrix to pcoa
+        # gives same results
+        for dm in (data, DistanceMatrix(data)):
+            results = pcoa(dm)
+            assert_ordination_results_equal(results, expected_results,
+                                            ignore_directionality=True)
+
+    def test_book_example_dataset(self):
+        # Adapted from PyCogent's `test_principal_coordinate_analysis`:
+        #   "I took the example in the book (see intro info), and did
+        #   the principal coordinates analysis, plotted the data and it
+        #   looked right".
+        eigvals = [0.73599103, 0.26260032, 0.14926222, 0.06990457,
+                   0.02956972, 0.01931184, 0., 0., 0., 0., 0., 0., 0.,
+                   0.]
+        proportion_explained = [0.58105792, 0.20732046, 0.1178411,
+                                0.05518899, 0.02334502, 0.01524651, 0.,
+                                0., 0., 0., 0., 0., 0., 0.]
+        sample_ids = [str(i) for i in range(14)]
+        axis_labels = ['PC%d' % i for i in range(1, 15)]
+
+        expected_results = OrdinationResults(
+            short_method_name='PCoA',
+            long_method_name='Principal Coordinate Analysis',
+            eigvals=pd.Series(eigvals, index=axis_labels),
+            samples=pd.DataFrame(
+                np.loadtxt(get_data_path('exp_PCoAzeros_site')),
+                index=sample_ids, columns=axis_labels),
+            proportion_explained=pd.Series(proportion_explained,
+                                           index=axis_labels))
+
+        results = npt.assert_warns(RuntimeWarning, pcoa, self.dm)
+
+        # Note the absolute value because column can have signs swapped
+        results.samples = np.abs(results.samples)
+        assert_ordination_results_equal(results, expected_results,
+                                        ignore_directionality=True)
+
+    def test_invalid_input(self):
+        with npt.assert_raises(DissimilarityMatrixError):
+            pcoa([[1, 2], [3, 4]])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/stats/ordination/tests/test_redundancy_analysis.py b/skbio/stats/ordination/tests/test_redundancy_analysis.py
new file mode 100644
index 0000000..36a767a
--- /dev/null
+++ b/skbio/stats/ordination/tests/test_redundancy_analysis.py
@@ -0,0 +1,171 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from unittest import TestCase, main
+
+from skbio import OrdinationResults
+from skbio.stats.ordination import rda
+from skbio.util import get_data_path, assert_ordination_results_equal
+
+
+class TestRDAErrors(TestCase):
+    def setUp(self):
+        pass
+
+    def test_shape(self):
+        for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
+            Y = pd.DataFrame(np.random.randn(n, p))
+            X = pd.DataFrame(np.random.randn(n_, m))
+            yield npt.assert_raises, ValueError, rda, Y, X, None, None
+
+
+class TestRDAResults(TestCase):
+    # STATUS: L&L only shows results with scaling 1, and they agree
+    # with vegan's (module multiplying by a constant). I can also
+    # compute scaling 2, agreeing with vegan, but there are no written
+    # results in L&L.
+    def setUp(self):
+        """Data from table 11.3 in Legendre & Legendre 1998."""
+        self.sample_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
+                           'Site5', 'Site6', 'Site7', 'Site8', 'Site9']
+        self.feature_ids = ['Species0', 'Species1', 'Species2', 'Species3',
+                            'Species4', 'Species5']
+        self.env_ids = map(str, range(4))
+        self.pc_ids = ['RDA1', 'RDA2', 'RDA3', 'RDA4', 'RDA5', 'RDA6', 'RDA7']
+
+        self.Y = pd.DataFrame(
+            np.loadtxt(get_data_path('example2_Y')),
+            index=self.sample_ids, columns=self.feature_ids)
+
+        self.X = pd.DataFrame(
+            np.loadtxt(get_data_path('example2_X')),
+            index=self.sample_ids, columns=self.env_ids)
+
+    def test_scaling1(self):
+
+        scores = rda(self.Y, self.X, scaling=1)
+
+        biplot_scores = pd.DataFrame(np.loadtxt(
+            get_data_path('example2_biplot_scaling1')))
+
+        sample_constraints = pd.DataFrame(np.loadtxt(
+            get_data_path('example2_sample_constraints_scaling1')))
+
+        # Load data as computed with vegan 2.0-8
+        vegan_features = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_species_scaling1_from_vegan')),
+            index=self.feature_ids,
+            columns=self.pc_ids)
+
+        vegan_samples = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_site_scaling1_from_vegan')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        sample_constraints = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_sample_constraints_scaling1')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        biplot_scores = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_biplot_scaling1')))
+
+        # These are wrong. See issue #1002
+        proportion_explained = pd.Series([0.44275783, 0.25614586,
+                                          0.15280354, 0.10497021,
+                                          0.02873375, 0.00987052,
+                                          0.00471828],
+                                         index=self.pc_ids)
+        # These are wrong. See issue #1002
+        eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
+                             1.680705, 0.577350, 0.275984],
+                            index=self.pc_ids)
+
+        exp = OrdinationResults(
+            'RDA', 'Redundancy Analysis',
+            samples=vegan_samples,
+            features=vegan_features,
+            sample_constraints=sample_constraints,
+            biplot_scores=biplot_scores,
+            proportion_explained=proportion_explained,
+            eigvals=eigvals)
+
+        assert_ordination_results_equal(scores, exp,
+                                        ignore_biplot_scores_labels=True,
+                                        decimal=6)
+
+    def test_scaling2(self):
+
+        scores = rda(self.Y, self.X, scaling=2)
+
+        biplot_scores = pd.DataFrame(np.loadtxt(
+            get_data_path('example2_biplot_scaling2')))
+
+        sample_constraints = pd.DataFrame(np.loadtxt(
+            get_data_path('example2_sample_constraints_scaling2')))
+
+        # Load data as computed with vegan 2.0-8
+        vegan_features = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_species_scaling2_from_vegan')),
+            index=self.feature_ids,
+            columns=self.pc_ids)
+
+        vegan_samples = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_site_scaling2_from_vegan')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        sample_constraints = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_sample_constraints_scaling2')),
+            index=self.sample_ids,
+            columns=self.pc_ids)
+
+        biplot_scores = pd.DataFrame(
+            np.loadtxt(get_data_path(
+                'example2_biplot_scaling2')))
+
+        # These are wrong. See issue #1002
+        proportion_explained = pd.Series([0.44275783, 0.25614586,
+                                          0.15280354, 0.10497021,
+                                          0.02873375, 0.00987052,
+                                          0.00471828],
+                                         index=self.pc_ids)
+        # These are wrong. See issue #1002
+        eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
+                             1.680705, 0.577350, 0.275984],
+                            index=self.pc_ids)
+
+        exp = OrdinationResults(
+            'RDA', 'Redundancy Analysis',
+            samples=vegan_samples,
+            features=vegan_features,
+            sample_constraints=sample_constraints,
+            biplot_scores=biplot_scores,
+            proportion_explained=proportion_explained,
+            eigvals=eigvals)
+
+        assert_ordination_results_equal(scores, exp,
+                                        ignore_biplot_scores_labels=True,
+                                        decimal=6)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/ordination/tests/test_util.py b/skbio/stats/ordination/tests/test_util.py
new file mode 100644
index 0000000..785b861
--- /dev/null
+++ b/skbio/stats/ordination/tests/test_util.py
@@ -0,0 +1,69 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+
+import numpy as np
+import numpy.testing as npt
+
+from unittest import TestCase, main
+
+from skbio.stats.ordination import corr, mean_and_std, e_matrix, f_matrix
+
+
+class TestUtils(TestCase):
+    def setUp(self):
+        self.x = np.array([[1, 2, 3], [4, 5, 6]])
+        self.y = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+
+        self.matrix = np.arange(1, 7).reshape(2, 3)
+        self.matrix2 = np.arange(1, 10).reshape(3, 3)
+
+        self.small_mat = np.array([[7, 5, 5], [4, 4, 9], [7, 5, 3]])
+
+    def test_mean_and_std(self):
+        obs = mean_and_std(self.x)
+        npt.assert_almost_equal((3.5, 1.707825127), obs)
+
+        obs = mean_and_std(self.x, with_std=False)
+        self.assertEqual((3.5, None), obs)
+
+        obs = mean_and_std(self.x, ddof=2)
+        npt.assert_almost_equal((3.5, 2.091650066), obs)
+
+    def test_mean_and_std_no_mean_no_std(self):
+        with npt.assert_raises(ValueError):
+            mean_and_std(self.x, with_mean=False, with_std=False)
+
+    def test_corr(self):
+        obs = corr(self.small_mat)
+        npt.assert_almost_equal(np.array([[1, 1, -0.94491118],
+                                          [1, 1, -0.94491118],
+                                          [-0.94491118, -0.94491118, 1]]),
+                                obs)
+
+    def test_corr_shape_mismatch(self):
+        with npt.assert_raises(ValueError):
+            corr(self.x, self.y)
+
+    def test_e_matrix(self):
+        E = e_matrix(self.matrix)
+        expected_E = np.array([[-0.5,  -2.,  -4.5],
+                               [-8., -12.5, -18.]])
+        npt.assert_almost_equal(E, expected_E)
+
+    def test_f_matrix(self):
+        F = f_matrix(self.matrix2)
+        expected_F = np.zeros((3, 3))
+        # Note that `test_make_F_matrix` in cogent is wrong
+        npt.assert_almost_equal(F, expected_F)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/power.py b/skbio/stats/power.py
index 5151800..3567d66 100644
--- a/skbio/stats/power.py
+++ b/skbio/stats/power.py
@@ -50,7 +50,6 @@ Functions
     subsample_paired_power
     confidence_bound
     paired_subsamples
-    bootstrap_power_curve
 
 Examples
 --------
@@ -100,29 +99,32 @@ estimate for the critical value of 0.01, and a critical value of 0.001.
 ...                                       min_counts=3,
 ...                                       counts_interval=1,
 ...                                       draw_mode="matched",
-...                                       alpha_pwr=0.1)
+...                                       alpha_pwr=0.1,
+...                                       num_iter=25)
 >>> pwr_010, counts_010 = subsample_power(test=f,
 ...                                       samples=samples,
 ...                                       max_counts=10,
 ...                                       min_counts=3,
 ...                                       counts_interval=1,
 ...                                       draw_mode="matched",
-...                                       alpha_pwr=0.01)
+...                                       alpha_pwr=0.01,
+...                                       num_iter=25)
 >>> pwr_001, counts_001 = subsample_power(test=f,
 ...                                       samples=samples,
 ...                                       max_counts=10,
 ...                                       min_counts=3,
 ...                                       counts_interval=1,
 ...                                       draw_mode="matched",
-...                                       alpha_pwr=0.001)
+...                                       alpha_pwr=0.001,
+...                                       num_iter=25)
 >>> counts_100
 array([3, 4, 5, 6, 7, 8, 9])
 >>> pwr_100.mean(0)
-array([ 0.4716,  0.8226,  0.9424,  0.986 ,  0.9988,  1.    ,  1.    ])
+array([ 0.484,  0.844,  0.932,  0.984,  1.   ,  1.   ,  1.   ])
 >>> pwr_010.mean(0)
-array([ 0.0492,  0.2368,  0.5462,  0.823 ,  0.9474,  0.9828,  0.9982])
+array([ 0.044,  0.224,  0.572,  0.836,  0.928,  0.996,  1.   ])
 >>> pwr_001.mean(0)
-array([ 0.0028,  0.0174,  0.1262,  0.342 ,  0.5928,  0.8256,  0.9594])
+array([ 0.   ,  0.016,  0.108,  0.332,  0.572,  0.848,  0.956])
 
 Based on this power estimate, as we increase our confidence that we have not
 committed a type I error and identified a false positive, the number of samples
@@ -149,7 +151,7 @@ import numpy as np
 import scipy.stats
 import six
 
-from skbio.util._decorator import experimental, deprecated
+from skbio.util._decorator import experimental
 
 
 @experimental(as_of="0.4.0")
@@ -261,11 +263,11 @@ def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05, ratio=None,
 
     >>> from scipy.stats import chisquare, nanmean
     >>> test = lambda x: chisquare(np.array([x[i].sum() for i in
-    ...     xrange(len(x))]))[1]
+    ...     range(len(x))]))[1]
 
     Let's make sure that our two distributions are different.
 
-    >>> round(test([pre_rate, pos_rate]), 3)
+    >>> print(round(test([pre_rate, pos_rate]), 3))
     0.003
 
     Since there are an even number of samples, and we don't have enough
@@ -319,7 +321,7 @@ def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05, ratio=None,
     >>> from scipy.stats import kruskal
     >>> def metabolite_test(x):
     ...     return kruskal(x[0], x[1])[1]
-    >>> round(metabolite_test([met_pos, met_neg]), 3)
+    >>> print(round(metabolite_test([met_pos, met_neg]), 3))
     0.005
 
     When we go to perform the statistical test on all the data, you might
@@ -513,14 +515,14 @@ def subsample_paired_power(test, meta, cat, control_cats, order=None,
     ...                                   cat='TREATMENT',
     ...                                   control_cats=control_cats,
     ...                                   counts_interval=5,
-    ...                                   num_iter=100,
+    ...                                   num_iter=25,
     ...                                   num_runs=5)
     >>> cnt
     array([  5.,  10.,  15.,  20.])
     >>> pwr.mean(0)
-    array([ 0.196,  0.356,  0.642,  0.87 ])
+    array([ 0.24 ,  0.528,  0.68 ,  0.88 ])
     >>> pwr.std(0).round(3)
-    array([ 0.019,  0.021,  0.044,  0.026])
+    array([ 0.088,  0.127,  0.168,  0.08 ])
 
     Estimating off the power curve, it looks like 20 cells per group may
     provide adequate power for this experiment, although the large variance
@@ -613,110 +615,6 @@ def confidence_bound(vec, alpha=0.05, df=None, axis=None):
 
     return bound
 
-bootstrap_power_curve_deprecation_reason = (
-    "Please use skbio.stats.power.subsample_power or "
-    "skbio.stats.power.subsample_paired_power followed by "
-    "confidence_bound.")
-
-
- at deprecated(as_of="0.2.3-dev", until="0.4.1",
-            reason=bootstrap_power_curve_deprecation_reason)
-def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
-                          alpha=0.05, mode='ind', num_iter=500, num_runs=10):
-    r"""Repeatedly calculates the power curve for a specified alpha level
-
-    Parameters
-    ----------
-    test : function
-        The statistical test which accepts an array_like of sample ids
-        (list of lists or arrays) and returns a p-value.
-    samples : array_like
-        samples can be a list of lists or an array where each sublist or row in
-        the array corresponds to a sampled group.
-    sample_counts : 1-D array_like
-        A vector of the number of samples which should be sampled in each curve
-    ratio : 1-D array_like, optional
-        The fraction of the sample counts which should be
-        assigned to each
-        group. This must be a none-type object, or the same length as samples.
-        If Ratio is None, the same number of observations are drawn from
-        each sample.
-    alpha : float, optional
-        The default is 0.05. The critical value for calculating power.
-    mode : {"ind", "matched"}, optional
-        "matched" samples should be used when observations in
-        samples have corresponding observations in other groups. For instance,
-        this may be useful when working with regression data where
-        :math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
-        y_{n}`.
-    num_iter : positive int, optional
-        The number of p-values to generate for each point on the curve.
-    num_runs : positive int, optional
-        The number of times to calculate each curve.
-
-    Returns
-    -------
-    power_mean : 1-D array
-        The mean p-values from the iterations.
-    power_bound : vector
-        The variance in the p-values.
-
-    Examples
-    --------
-    Suppose we have 100 samples randomly drawn from two normal distributions,
-    the first with mean 0 and standard deviation 1, and the second with mean 3
-    and standard deviation 1.5
-
-    >>> import numpy as np
-    >>> np.random.seed(20)
-    >>> samples_1 = np.random.randn(100)
-    >>> samples_2 = 1.5 * np.random.randn(100) + 1
-
-    We want to test the statistical power of an independent two sample t-test
-    comparing the two populations. We can define an anonymous function, `f`,
-    to wrap the scipy function for independent t tests,
-    `scipy.stats.ttest_ind`. The test function will take a list of value
-    vectors and return a p value.
-
-    >>> from scipy.stats import ttest_ind
-    >>> f = lambda x: ttest_ind(x[0], x[1])[1]
-
-    Now, we can determine the statistical power, or the probability that we do
-    not have a false negative given that we do not have a false positive, by
-    varying a number of subsamples.
-
-    >>> from skbio.stats.power import bootstrap_power_curve
-    >>> sample_counts = np.arange(5, 80, 5)
-    >>> power_mean, power_bound = bootstrap_power_curve(f,
-    ...                                                 [samples_1, samples_2],
-    ...                                                 sample_counts)
-    >>> sample_counts[power_mean - power_bound.round(3) > .80].min()
-    20
-
-    Based on this analysis, it looks like we need at least 20 observations
-    from each distribution to avoid committing a type II error more than 20%
-    of the time.
-
-    """
-
-    # Corrects the alpha value into a matrix
-    alpha = np.ones((num_runs)) * alpha
-
-    # Boot straps the power curve
-    power = _calculate_power_curve(test=test,
-                                   samples=samples,
-                                   sample_counts=sample_counts,
-                                   ratio=ratio,
-                                   num_iter=num_iter,
-                                   alpha=alpha,
-                                   mode=mode)
-    # Calculates two summary statistics
-    power_mean = power.mean(0)
-    power_bound = confidence_bound(power, alpha=alpha[0], axis=0)
-
-    # Calculates summary statistics
-    return power_mean, power_bound
-
 
 @experimental(as_of="0.4.0")
 def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
@@ -1110,8 +1008,11 @@ def _identify_sample_groups(meta, cat, control_cats, order, strict_match):
 
     # Groups the data by the control groups
     ctrl_groups = meta.groupby(control_cats).groups
-    # Identifies the samples that satisfy the control pairs
-    for (g, ids) in viewitems(ctrl_groups):
+    # Identifies the samples that satisfy the control pairs. Keys are iterated
+    # in sorted order so that results don't change with different dictionary
+    # ordering (especially apparent in Python 3).
+    for g in sorted(ctrl_groups, key=lambda k: str(k)):
+        ids = ctrl_groups[g]
         # If strict_match, Skips over data that has nans
         if not _check_nans(g, switch=True) and strict_match:
             continue
diff --git a/skbio/stats/spatial.py b/skbio/stats/spatial.py
deleted file mode 100644
index 9cfcb2a..0000000
--- a/skbio/stats/spatial.py
+++ /dev/null
@@ -1,201 +0,0 @@
-r"""
-Spatial Statistics (:mod:`skbio.stats.spatial`)
-===============================================
-
-.. currentmodule:: skbio.stats.spatial
-
-This module provides functions for spatial analysis.
-
-Functions
----------
-
-.. autosummary::
-   :toctree: generated/
-
-   procrustes
-
-"""
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-import numpy as np
-
-from skbio.util._decorator import deprecated
-
-
- at deprecated(as_of="0.4.0", until="0.4.1",
-            reason="You should now use scipy.spatial.procrustes.")
-def procrustes(data1, data2):
-    r"""Procrustes analysis, a similarity test for two data sets
-
-    Each input matrix is a set of points or vectors (the rows of the matrix).
-    The dimension of the space is the number of columns of each matrix. Given
-    two identially sized matrices, procrustes standardizes both such that:
-
-    - trace(AA') = 1  (A' is the transpose, and the product is a standard
-      matrix product).
-    - Both sets of points are centered around the origin.
-
-    Procrustes ([1]_, [2]_) then applies the optimal transform to the second
-    matrix (including scaling/dilation, rotations, and reflections) to minimize
-    M^2 = sum(square(mtx1 - mtx2)), or the sum of the squares of the pointwise
-    differences between the two input datasets.
-
-    If two data sets have different dimensionality (different number of
-    columns), simply add columns of zeros the the smaller of the two.
-
-    This function was not designed to handle datasets with different numbers of
-    datapoints (rows).
-
-    Parameters
-    ----------
-    data1 : array_like
-        matrix, n rows represent points in k (columns) space data1 is the
-        reference data, after it is standardised, the data from data2 will
-        be transformed to fit the pattern in data1 (must have >1 unique
-        points).
-
-    data2 : array_like
-        n rows of data in k space to be fit to data1.  Must be the  same
-        shape (numrows, numcols) as data1 (must have >1 unique points).
-
-
-    Returns
-    -------
-    mtx1 : array_like
-        a standardized version of data1
-    mtx2 : array_like
-        the orientation of data2 that best fits data1. Centered, but not
-        necessarily trace(mtx2*mtx2') = 1
-    disparity : array_like
-        M^2 defined above
-
-
-    Notes
-    -----
-
-    - The disparity should not depend on the order of the input matrices, but
-      the output matrices will, as only the first output matrix is guaranteed
-      to be scaled such that ``trace(AA') = 1``.
-
-    - Duplicate datapoints are generally ok, duplicating a data point will
-      increase its effect on the procrustes fit.
-
-    - The disparity scales as the number of points per input matrix.
-
-    References
-    ----------
-
-    .. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
-    .. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
-
-    Examples
-    --------
-
-    >>> import numpy as np
-    >>> from skbio.stats.spatial import procrustes
-    >>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
-    >>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
-    >>> mtx1, mtx2, disparity = procrustes(a, b)
-    >>> print(round(disparity))
-    0.0
-
-    """
-    num_rows, num_cols = np.shape(data1)
-    if (num_rows, num_cols) != np.shape(data2):
-        raise ValueError("input matrices must be of same shape")
-    if num_rows == 0 or num_cols == 0:
-        raise ValueError("input matrices must be >0 rows, >0 cols")
-
-    # standardize each matrix
-    mtx1 = _center(data1)
-    mtx2 = _center(data2)
-
-    if (not np.any(mtx1)) or (not np.any(mtx2)):
-        raise ValueError("input matrices must contain >1 unique points")
-
-    mtx1 = _normalize(mtx1)
-    mtx2 = _normalize(mtx2)
-
-    # transform mtx2 to minimize disparity (sum( (mtx1[i,j] - mtx2[i,j])^2) )
-    mtx2 = _match_points(mtx1, mtx2)
-
-    disparity = _get_disparity(mtx1, mtx2)
-
-    return mtx1, mtx2, disparity
-
-
-def _center(mtx):
-    """Translate all data (rows of the matrix) to center on the origin
-
-    Parameters
-    ----------
-    mtx : array_like
-        Matrix to translate the data for.
-
-    Returns
-    -------
-    result : array_like ('d') array
-        Shifted version of the input data.  The new matrix is such that the
-        center of mass of the row vectors is centered at the origin.
-
-    """
-    result = np.array(mtx, 'd')
-    result -= np.mean(result, 0)
-    # subtract each column's mean from each element in that column
-    return result
-
-
-def _normalize(mtx):
-    """change scaling of data (in rows) such that trace(mtx*mtx') = 1
-
-    Parameters
-    ----------
-    mtx : array_like
-        Matrix to scale the data for.
-
-    Notes
-    -----
-    mtx' denotes the transpose of mtx
-
-    """
-    mtx = np.asarray(mtx, dtype=float)
-    return mtx / np.linalg.norm(mtx)
-
-
-def _match_points(mtx1, mtx2):
-    """Returns a transformed mtx2 that matches mtx1.
-
-    Returns
-    -------
-
-    A new matrix which is a transform of mtx2.  Scales and rotates a copy of
-    mtx 2.  See procrustes docs for details.
-
-    """
-    u, s, vh = np.linalg.svd(np.dot(np.transpose(mtx1), mtx2))
-    q = np.dot(np.transpose(vh), np.transpose(u))
-    new_mtx2 = np.dot(mtx2, q)
-    new_mtx2 *= np.sum(s)
-
-    return new_mtx2
-
-
-def _get_disparity(mtx1, mtx2):
-    """Measures the dissimilarity between two data sets
-
-    Returns
-    -------
-
-    M^2 = sum(square(mtx1 - mtx2)), the pointwise sum of squared differences
-
-    """
-    return(np.sum(np.square(mtx1 - mtx2)))
diff --git a/skbio/stats/tests/test_composition.py b/skbio/stats/tests/test_composition.py
index 62fa3fc..00e5ac1 100644
--- a/skbio/stats/tests/test_composition.py
+++ b/skbio/stats/tests/test_composition.py
@@ -11,38 +11,62 @@ from __future__ import absolute_import, division, print_function
 from unittest import TestCase, main
 import numpy as np
 import numpy.testing as npt
+import pandas.util.testing as pdt
+
+from numpy.random import normal
+import pandas as pd
+import scipy
+import copy
+from skbio.util import assert_data_frame_almost_equal
 from skbio.stats.composition import (closure, multiplicative_replacement,
-                                     perturb, perturb_inv, power,
-                                     clr, centralize)
+                                     perturb, perturb_inv, power, inner,
+                                     clr, clr_inv, ilr, ilr_inv,
+                                     centralize, _holm_bonferroni, ancom)
 
 
 class CompositionTests(TestCase):
 
     def setUp(self):
-        self.data1 = np.array([[2, 2, 6],
-                               [4, 4, 2]])
-        self.data2 = np.array([2, 2, 6])
-
-        self.data3 = np.array([[1, 2, 3, 0, 5],
-                               [1, 0, 0, 4, 5],
-                               [1, 2, 3, 4, 5]])
-        self.data4 = np.array([1, 2, 3, 0, 5])
-        self.data5 = [[2, 2, 6], [4, 4, 2]]
-        self.data6 = [[1, 2, 3, 0, 5],
-                      [1, 0, 0, 4, 5],
-                      [1, 2, 3, 4, 5]]
+        # Compositional data
+        self.cdata1 = np.array([[2, 2, 6],
+                                [4, 4, 2]])
+        self.cdata2 = np.array([2, 2, 6])
+
+        self.cdata3 = np.array([[1, 2, 3, 0, 5],
+                                [1, 0, 0, 4, 5],
+                                [1, 2, 3, 4, 5]])
+        self.cdata4 = np.array([1, 2, 3, 0, 5])
+        self.cdata5 = [[2, 2, 6], [4, 4, 2]]
+        self.cdata6 = [[1, 2, 3, 0, 5],
+                       [1, 0, 0, 4, 5],
+                       [1, 2, 3, 4, 5]]
+        self.cdata7 = [np.exp(1), 1, 1]
+        self.cdata8 = [np.exp(1), 1, 1, 1]
+
+        # Simplicial orthonormal basis obtained from Gram-Schmidt
+        self.ortho1 = [[0.44858053, 0.10905743, 0.22118102, 0.22118102],
+                       [0.3379924, 0.3379924, 0.0993132, 0.22470201],
+                       [0.3016453, 0.3016453, 0.3016453, 0.09506409]]
+
+        # Real data
+        self.rdata1 = [[0.70710678, -0.70710678, 0., 0.],
+                       [0.40824829, 0.40824829, -0.81649658, 0.],
+                       [0.28867513, 0.28867513, 0.28867513, -0.8660254]]
+
         # Bad datasets
+        # negative count
         self.bad1 = np.array([1, 2, -1])
+        # zero count
         self.bad2 = np.array([[[1, 2, 3, 0, 5]]])
 
     def test_closure(self):
 
-        npt.assert_allclose(closure(self.data1),
+        npt.assert_allclose(closure(self.cdata1),
                             np.array([[.2, .2, .6],
                                       [.4, .4, .2]]))
-        npt.assert_allclose(closure(self.data2),
+        npt.assert_allclose(closure(self.cdata2),
                             np.array([.2, .2, .6]))
-        npt.assert_allclose(closure(self.data5),
+        npt.assert_allclose(closure(self.cdata5),
                             np.array([[.2, .2, .6],
                                       [.4, .4, .2]]))
         with self.assertRaises(ValueError):
@@ -52,55 +76,55 @@ class CompositionTests(TestCase):
             closure(self.bad2)
 
         # make sure that inplace modification is not occurring
-        closure(self.data2)
-        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+        closure(self.cdata2)
+        npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
 
     def test_perturb(self):
-        pmat = perturb(closure(self.data1),
+        pmat = perturb(closure(self.cdata1),
                        closure(np.array([1, 1, 1])))
         npt.assert_allclose(pmat,
                             np.array([[.2, .2, .6],
                                       [.4, .4, .2]]))
 
-        pmat = perturb(closure(self.data1),
+        pmat = perturb(closure(self.cdata1),
                        closure(np.array([10, 10, 20])))
         npt.assert_allclose(pmat,
                             np.array([[.125, .125, .75],
                                       [1./3, 1./3, 1./3]]))
 
-        pmat = perturb(closure(self.data1),
+        pmat = perturb(closure(self.cdata1),
                        closure(np.array([10, 10, 20])))
         npt.assert_allclose(pmat,
                             np.array([[.125, .125, .75],
                                       [1./3, 1./3, 1./3]]))
 
-        pmat = perturb(closure(self.data2),
+        pmat = perturb(closure(self.cdata2),
                        closure([1, 2, 1]))
         npt.assert_allclose(pmat, np.array([1./6, 2./6, 3./6]))
 
-        pmat = perturb(closure(self.data5),
+        pmat = perturb(closure(self.cdata5),
                        closure(np.array([1, 1, 1])))
         npt.assert_allclose(pmat,
                             np.array([[.2, .2, .6],
                                       [.4, .4, .2]]))
 
         with self.assertRaises(ValueError):
-            perturb(closure(self.data5), self.bad1)
+            perturb(closure(self.cdata5), self.bad1)
 
         # make sure that inplace modification is not occurring
-        perturb(self.data2, [1, 2, 3])
-        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+        perturb(self.cdata2, [1, 2, 3])
+        npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
 
     def test_power(self):
-        pmat = power(closure(self.data1), 2)
+        pmat = power(closure(self.cdata1), 2)
         npt.assert_allclose(pmat,
                             np.array([[.04/.44, .04/.44, .36/.44],
                                       [.16/.36, .16/.36, .04/.36]]))
 
-        pmat = power(closure(self.data2), 2)
+        pmat = power(closure(self.cdata2), 2)
         npt.assert_allclose(pmat, np.array([.04, .04, .36])/.44)
 
-        pmat = power(closure(self.data5), 2)
+        pmat = power(closure(self.cdata5), 2)
         npt.assert_allclose(pmat,
                             np.array([[.04/.44, .04/.44, .36/.44],
                                       [.16/.36, .16/.36, .04/.36]]))
@@ -109,34 +133,55 @@ class CompositionTests(TestCase):
             power(self.bad1, 2)
 
         # make sure that inplace modification is not occurring
-        power(self.data2, 4)
-        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+        power(self.cdata2, 4)
+        npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
 
     def test_perturb_inv(self):
-        pmat = perturb_inv(closure(self.data1),
+        pmat = perturb_inv(closure(self.cdata1),
                            closure([.1, .1, .1]))
-        imat = perturb(closure(self.data1),
+        imat = perturb(closure(self.cdata1),
                        closure([10, 10, 10]))
         npt.assert_allclose(pmat, imat)
-        pmat = perturb_inv(closure(self.data1),
+        pmat = perturb_inv(closure(self.cdata1),
                            closure([1, 1, 1]))
         npt.assert_allclose(pmat,
                             closure([[.2, .2, .6],
                                      [.4, .4, .2]]))
-        pmat = perturb_inv(closure(self.data5),
+        pmat = perturb_inv(closure(self.cdata5),
                            closure([.1, .1, .1]))
-        imat = perturb(closure(self.data1), closure([10, 10, 10]))
+        imat = perturb(closure(self.cdata1), closure([10, 10, 10]))
         npt.assert_allclose(pmat, imat)
 
         with self.assertRaises(ValueError):
-            perturb_inv(closure(self.data1), self.bad1)
+            perturb_inv(closure(self.cdata1), self.bad1)
+
+        # make sure that inplace modification is not occurring
+        perturb_inv(self.cdata2, [1, 2, 3])
+        npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
+
+    def test_inner(self):
+        a = inner(self.cdata5, self.cdata5)
+        npt.assert_allclose(a, np.array([[0.80463264, -0.50766667],
+                                         [-0.50766667, 0.32030201]]))
+
+        b = inner(self.cdata7, self.cdata7)
+        npt.assert_allclose(b, 0.66666666666666663)
+
+        # Make sure that orthogonality holds
+        npt.assert_allclose(inner(self.ortho1, self.ortho1), np.identity(3),
+                            rtol=1e-04, atol=1e-06)
+
+        with self.assertRaises(ValueError):
+            inner(self.cdata1, self.cdata8)
 
         # make sure that inplace modification is not occurring
-        perturb_inv(self.data2, [1, 2, 3])
-        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+        inner(self.cdata1, self.cdata1)
+        npt.assert_allclose(self.cdata1,
+                            np.array([[2, 2, 6],
+                                      [4, 4, 2]]))
 
     def test_multiplicative_replacement(self):
-        amat = multiplicative_replacement(closure(self.data3))
+        amat = multiplicative_replacement(closure(self.cdata3))
         npt.assert_allclose(amat,
                             np.array([[0.087273, 0.174545, 0.261818,
                                        0.04, 0.436364],
@@ -145,13 +190,13 @@ class CompositionTests(TestCase):
                                        0.266667, 0.333333]]),
                             rtol=1e-5, atol=1e-5)
 
-        amat = multiplicative_replacement(closure(self.data4))
+        amat = multiplicative_replacement(closure(self.cdata4))
         npt.assert_allclose(amat,
                             np.array([0.087273, 0.174545, 0.261818,
                                       0.04, 0.436364]),
                             rtol=1e-5, atol=1e-5)
 
-        amat = multiplicative_replacement(closure(self.data6))
+        amat = multiplicative_replacement(closure(self.cdata6))
         npt.assert_allclose(amat,
                             np.array([[0.087273, 0.174545, 0.261818,
                                        0.04, 0.436364],
@@ -166,23 +211,23 @@ class CompositionTests(TestCase):
             multiplicative_replacement(self.bad2)
 
         # make sure that inplace modification is not occurring
-        multiplicative_replacement(self.data4)
-        npt.assert_allclose(self.data4, np.array([1, 2, 3, 0, 5]))
+        multiplicative_replacement(self.cdata4)
+        npt.assert_allclose(self.cdata4, np.array([1, 2, 3, 0, 5]))
 
     def test_clr(self):
-        cmat = clr(closure(self.data1))
+        cmat = clr(closure(self.cdata1))
         A = np.array([.2, .2, .6])
         B = np.array([.4, .4, .2])
 
         npt.assert_allclose(cmat,
                             [np.log(A / np.exp(np.log(A).mean())),
                              np.log(B / np.exp(np.log(B).mean()))])
-        cmat = clr(closure(self.data2))
+        cmat = clr(closure(self.cdata2))
         A = np.array([.2, .2, .6])
         npt.assert_allclose(cmat,
                             np.log(A / np.exp(np.log(A).mean())))
 
-        cmat = clr(closure(self.data5))
+        cmat = clr(closure(self.cdata5))
         A = np.array([.2, .2, .6])
         B = np.array([.4, .4, .2])
 
@@ -195,15 +240,28 @@ class CompositionTests(TestCase):
             clr(self.bad2)
 
         # make sure that inplace modification is not occurring
-        clr(self.data2)
-        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+        clr(self.cdata2)
+        npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
+
+    def test_clr_inv(self):
+        npt.assert_allclose(clr_inv(self.rdata1), self.ortho1)
+        npt.assert_allclose(clr(clr_inv(self.rdata1)), self.rdata1)
+
+        # make sure that inplace modification is not occurring
+        clr_inv(self.rdata1)
+        npt.assert_allclose(self.rdata1,
+                            np.array([[0.70710678, -0.70710678, 0., 0.],
+                                      [0.40824829, 0.40824829,
+                                       -0.81649658, 0.],
+                                      [0.28867513, 0.28867513,
+                                       0.28867513, -0.8660254]]))
 
     def test_centralize(self):
-        cmat = centralize(closure(self.data1))
+        cmat = centralize(closure(self.cdata1))
         npt.assert_allclose(cmat,
                             np.array([[0.22474487, 0.22474487, 0.55051026],
                                       [0.41523958, 0.41523958, 0.16952085]]))
-        cmat = centralize(closure(self.data5))
+        cmat = centralize(closure(self.cdata5))
         npt.assert_allclose(cmat,
                             np.array([[0.22474487, 0.22474487, 0.55051026],
                                       [0.41523958, 0.41523958, 0.16952085]]))
@@ -213,10 +271,510 @@ class CompositionTests(TestCase):
         with self.assertRaises(ValueError):
             centralize(self.bad2)
 
-        centralize(self.data1)
-        npt.assert_allclose(self.data1,
+        # make sure that inplace modification is not occurring
+        centralize(self.cdata1)
+        npt.assert_allclose(self.cdata1,
+                            np.array([[2, 2, 6],
+                                      [4, 4, 2]]))
+
+    def test_ilr(self):
+        mat = closure(self.cdata7)
+        npt.assert_array_almost_equal(ilr(mat),
+                                      np.array([0.70710678, 0.40824829]))
+
+        # Should give same result as inner
+        npt.assert_allclose(ilr(self.ortho1), np.identity(3),
+                            rtol=1e-04, atol=1e-06)
+
+        with self.assertRaises(ValueError):
+            ilr(self.cdata1, basis=self.cdata1)
+
+        # make sure that inplace modification is not occurring
+        ilr(self.cdata1)
+        npt.assert_allclose(self.cdata1,
+                            np.array([[2, 2, 6],
+                                      [4, 4, 2]]))
+
+    def test_ilr_inv(self):
+        mat = closure(self.cdata7)
+        npt.assert_array_almost_equal(ilr_inv(ilr(mat)), mat)
+
+        npt.assert_allclose(ilr_inv(np.identity(3)), self.ortho1,
+                            rtol=1e-04, atol=1e-06)
+
+        with self.assertRaises(ValueError):
+            ilr_inv(self.cdata1, basis=self.cdata1)
+
+        # make sure that inplace modification is not occurring
+        ilr_inv(self.cdata1)
+        npt.assert_allclose(self.cdata1,
                             np.array([[2, 2, 6],
                                       [4, 4, 2]]))
 
+
+class AncomTests(TestCase):
+    def setUp(self):
+        # Basic count data with 2 groupings
+        self.table1 = pd.DataFrame([
+            [10, 10, 10, 20, 20, 20],
+            [11, 12, 11, 21, 21, 21],
+            [10, 11, 10, 10, 11, 10],
+            [10, 11, 10, 10, 10, 9],
+            [10, 11, 10, 10, 10, 10],
+            [10, 11, 10, 10, 10, 11],
+            [10, 13, 10, 10, 10, 12]]).T
+        self.cats1 = pd.Series([0, 0, 0, 1, 1, 1])
+
+        # Real valued data with 2 groupings
+        D, L = 40, 80
+        np.random.seed(0)
+        self.table2 = np.vstack((np.concatenate((normal(10, 1, D),
+                                                 normal(200, 1, D))),
+                                 np.concatenate((normal(20, 1, D),
+                                                 normal(100000, 1, D))),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L),
+                                 np.concatenate((normal(20, 1, D),
+                                                 normal(100000, 1, D))),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L)))
+        self.table2 = np.absolute(self.table2)
+        self.table2 = pd.DataFrame(self.table2.astype(np.int).T)
+        self.cats2 = pd.Series([0]*D + [1]*D)
+
+        # Real valued data with 2 groupings and no significant difference
+        self.table3 = pd.DataFrame([
+            [10, 10.5, 10, 10, 10.5, 10.3],
+            [11, 11.5, 11, 11, 11.5, 11.3],
+            [10, 10.5, 10, 10, 10.5, 10.2],
+            [10, 10.5, 10, 10, 10.5, 10.3],
+            [10, 10.5, 10, 10, 10.5, 10.1],
+            [10, 10.5, 10, 10, 10.5, 10.6],
+            [10, 10.5, 10, 10, 10.5, 10.4]]).T
+        self.cats3 = pd.Series([0, 0, 0, 1, 1, 1])
+
+        # Real valued data with 3 groupings
+        D, L = 40, 120
+        np.random.seed(0)
+        self.table4 = np.vstack((np.concatenate((normal(10, 1, D),
+                                                 normal(200, 1, D),
+                                                 normal(400, 1, D))),
+                                 np.concatenate((normal(20, 1, D),
+                                                 normal(100000, 1, D),
+                                                 normal(2000, 1, D))),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L),
+                                 np.concatenate((normal(20, 1, D),
+                                                 normal(100000, 1, D),
+                                                 normal(2000, 1, D))),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L),
+                                 normal(10, 1, L)))
+        self.table4 = np.absolute(self.table4)
+        self.table4 = pd.DataFrame(self.table4.astype(np.int).T)
+        self.cats4 = pd.Series([0]*D + [1]*D + [2]*D)
+
+        # Noncontiguous case
+        self.table5 = pd.DataFrame([
+            [11, 12, 21, 11, 21, 21],
+            [10, 11, 10, 10, 11, 10],
+            [10, 11, 10, 10, 10, 9],
+            [10, 11, 10, 10, 10, 10],
+            [10, 11, 10, 10, 10, 11],
+            [10, 10, 20, 9,  20, 20],
+            [10, 13, 10, 10, 10, 12]]).T
+        self.cats5 = pd.Series([0, 0, 1, 0, 1, 1])
+
+        # Different number of classes case
+        self.table6 = pd.DataFrame([
+            [11, 12, 9, 11, 21, 21],
+            [10, 11, 10, 10, 11, 10],
+            [10, 11, 10, 10, 10, 9],
+            [10, 11, 10, 10, 10, 10],
+            [10, 11, 10, 10, 10, 11],
+            [10, 10, 10, 9,  20, 20],
+            [10, 13, 10, 10, 10, 12]]).T
+        self.cats6 = pd.Series([0, 0, 0, 0, 1, 1])
+
+        # Categories are letters
+        self.table7 = pd.DataFrame([
+            [11, 12, 9, 11, 21, 21],
+            [10, 11, 10, 10, 11, 10],
+            [10, 11, 10, 10, 10, 9],
+            [10, 11, 10, 10, 10, 10],
+            [10, 11, 10, 10, 10, 11],
+            [10, 10, 10, 9,  20, 20],
+            [10, 13, 10, 10, 10, 12]]).T
+        self.cats7 = pd.Series(['a', 'a', 'a', 'a', 'b', 'b'])
+
+        # Swap samples
+        self.table8 = pd.DataFrame([
+            [10, 10, 10, 20, 20, 20],
+            [11, 12, 11, 21, 21, 21],
+            [10, 11, 10, 10, 11, 10],
+            [10, 11, 10, 10, 10, 9],
+            [10, 11, 10, 10, 10, 10],
+            [10, 11, 10, 10, 10, 11],
+            [10, 13, 10, 10, 10, 12]]).T
+        self.table8.index = ['a', 'b', 'c',
+                             'd', 'e', 'f']
+        self.cats8 = pd.Series([0, 0, 1, 0, 1, 1],
+                               index=['a', 'b', 'd',
+                                      'c', 'e', 'f'])
+
+        # Real valued data with 3 groupings
+        D, L = 40, 120
+        np.random.seed(0)
+        self.table9 = np.vstack((np.concatenate((normal(10, 1, D),
+                                                 normal(200, 1, D),
+                                                 normal(400, 1, D))),
+                                 np.concatenate((normal(200000, 1, D),
+                                                 normal(10, 1, D),
+                                                 normal(2000, 1, D))),
+                                 normal(10, 10, L),
+                                 normal(10, 10, L),
+                                 np.concatenate((normal(2000, 1, D),
+                                                 normal(100000, 1, D),
+                                                 normal(2000, 1, D))),
+                                 normal(10000, 1000, L),
+                                 normal(10, 10, L),
+                                 normal(10, 10, L),
+                                 normal(10, 10, L),
+                                 normal(10000, 1000, L),
+                                 normal(10, 10, L),
+                                 normal(10, 10, L),
+                                 normal(10, 10, L),
+                                 np.concatenate((normal(2000, 1, D),
+                                                 normal(100000, 1, D),
+                                                 normal(2000, 1, D))),
+                                 normal(10000, 1000, L),
+                                 normal(10, 10, L),
+                                 normal(10, 10, L),
+                                 normal(10, 10, L)))
+        self.table9 = np.absolute(self.table9)+1
+        self.table9 = pd.DataFrame(self.table9.astype(np.int).T)
+        self.cats9 = pd.Series([0]*D + [1]*D + [2]*D)
+
+        # Real valued data with 2 groupings
+        D, L = 40, 80
+        np.random.seed(0)
+        self.table10 = np.vstack((np.concatenate((normal(10, 1, D),
+                                                  normal(200, 1, D))),
+                                  np.concatenate((normal(10, 1, D),
+                                                  normal(200, 1, D))),
+                                  np.concatenate((normal(20, 10, D),
+                                                  normal(100, 10, D))),
+                                  normal(10, 1, L),
+                                  np.concatenate((normal(200, 100, D),
+                                                  normal(100000, 100, D))),
+                                  np.concatenate((normal(200000, 100, D),
+                                                  normal(300, 100, D))),
+                                  np.concatenate((normal(200000, 100, D),
+                                                  normal(300, 100, D))),
+                                  np.concatenate((normal(20, 20, D),
+                                                  normal(40, 10, D))),
+                                  np.concatenate((normal(20, 20, D),
+                                                  normal(40, 10, D))),
+                                  np.concatenate((normal(20, 20, D),
+                                                  normal(40, 10, D))),
+                                  normal(100, 10, L),
+                                  normal(100, 10, L),
+                                  normal(1000, 10, L),
+                                  normal(1000, 10, L),
+                                  normal(10, 10, L),
+                                  normal(10, 10, L),
+                                  normal(10, 10, L),
+                                  normal(10, 10, L)))
+        self.table10 = np.absolute(self.table10) + 1
+        self.table10 = pd.DataFrame(self.table10.astype(np.int).T)
+        self.cats10 = pd.Series([0]*D + [1]*D)
+
+        # zero count
+        self.bad1 = pd.DataFrame(np.array([
+            [10, 10, 10, 20, 20, 0],
+            [11, 11, 11, 21, 21, 21],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10]]).T)
+        # negative count
+        self.bad2 = pd.DataFrame(np.array([
+            [10, 10, 10, 20, 20, 1],
+            [11, 11, 11, 21, 21, 21],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, -1],
+            [10, 10, 10, 10, 10, 10]]).T)
+
+        # missing count
+        self.bad3 = pd.DataFrame(np.array([
+            [10, 10, 10, 20, 20, 1],
+            [11, 11, 11, 21, 21, 21],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, 10],
+            [10, 10, 10, 10, 10, np.nan],
+            [10, 10, 10, 10, 10, 10]]).T)
+        self.badcats1 = pd.Series([0, 0, 0, 1, np.nan, 1])
+        self.badcats2 = pd.Series([0, 0, 0, 0, 0, 0])
+        self.badcats3 = pd.Series([0, 0, 1, 1])
+        self.badcats4 = pd.Series(range(len(self.table1)))
+        self.badcats5 = pd.Series([1]*len(self.table1))
+
+    def test_ancom_basic_counts(self):
+        test_table = pd.DataFrame(self.table1)
+        original_table = copy.deepcopy(test_table)
+        test_cats = pd.Series(self.cats1)
+        original_cats = copy.deepcopy(test_cats)
+        result = ancom(test_table,
+                       test_cats,
+                       multiple_comparisons_correction=None)
+        # Test to make sure that the input table hasn't be altered
+        assert_data_frame_almost_equal(original_table, test_table)
+        # Test to make sure that the input table hasn't be altered
+        pdt.assert_series_equal(original_cats, test_cats)
+        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+                            'reject': np.array([True, True, False, False,
+                                                False, False, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_basic_proportions(self):
+        # Converts from counts to proportions
+        test_table = pd.DataFrame(closure(self.table1))
+        original_table = copy.deepcopy(test_table)
+        test_cats = pd.Series(self.cats1)
+        original_cats = copy.deepcopy(test_cats)
+        result = ancom(test_table,
+                       test_cats,
+                       multiple_comparisons_correction=None)
+        # Test to make sure that the input table hasn't be altered
+        assert_data_frame_almost_equal(original_table, test_table)
+        # Test to make sure that the input table hasn't be altered
+        pdt.assert_series_equal(original_cats, test_cats)
+        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+                            'reject': np.array([True, True, False, False,
+                                                False, False, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_multiple_groups(self):
+        test_table = pd.DataFrame(self.table4)
+        original_table = copy.deepcopy(test_table)
+        test_cats = pd.Series(self.cats4)
+        original_cats = copy.deepcopy(test_cats)
+        result = ancom(test_table, test_cats)
+        # Test to make sure that the input table hasn't be altered
+        assert_data_frame_almost_equal(original_table, test_table)
+        # Test to make sure that the input table hasn't be altered
+        pdt.assert_series_equal(original_cats, test_cats)
+        exp = pd.DataFrame({'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
+                            'reject': np.array([True, True, False, False,
+                                                True, False, False, False,
+                                                False], dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_noncontiguous(self):
+        result = ancom(self.table5,
+                       self.cats5,
+                       multiple_comparisons_correction=None)
+        exp = pd.DataFrame({'W': np.array([6, 2, 2, 2, 2, 6, 2]),
+                            'reject': np.array([True, False, False, False,
+                                                False, True, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_unbalanced(self):
+        result = ancom(self.table6,
+                       self.cats6,
+                       multiple_comparisons_correction=None)
+        exp = pd.DataFrame({'W': np.array([5, 3, 3, 2, 2, 5, 2]),
+                            'reject': np.array([True, False, False, False,
+                                                False, True, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_letter_categories(self):
+        result = ancom(self.table7,
+                       self.cats7,
+                       multiple_comparisons_correction=None)
+        exp = pd.DataFrame({'W': np.array([5, 3, 3, 2, 2, 5, 2]),
+                            'reject': np.array([True, False, False, False,
+                                                False, True, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_multiple_comparisons(self):
+        result = ancom(self.table1,
+                       self.cats1,
+                       multiple_comparisons_correction='holm-bonferroni',
+                       significance_test=scipy.stats.mannwhitneyu)
+        exp = pd.DataFrame({'W': np.array([0]*7),
+                            'reject': np.array([False]*7, dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_alternative_test(self):
+        result = ancom(self.table1,
+                       self.cats1,
+                       multiple_comparisons_correction=None,
+                       significance_test=scipy.stats.mannwhitneyu)
+        exp = pd.DataFrame({'W': np.array([6, 6, 2, 2, 2, 2, 2]),
+                            'reject': np.array([True,  True, False, False,
+                                                False, False, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_normal_data(self):
+        result = ancom(self.table2,
+                       self.cats2,
+                       multiple_comparisons_correction=None,
+                       significance_test=scipy.stats.mannwhitneyu)
+        exp = pd.DataFrame({'W': np.array([8, 8, 3, 3,
+                                           8, 3, 3, 3, 3]),
+                            'reject': np.array([True, True, False, False,
+                                                True, False, False,
+                                                False, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_basic_counts_swapped(self):
+        result = ancom(self.table8, self.cats8)
+        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+                            'reject': np.array([True, True, False, False,
+                                                False, False, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_no_signal(self):
+        result = ancom(self.table3,
+                       self.cats3,
+                       multiple_comparisons_correction=None)
+        exp = pd.DataFrame({'W': np.array([0]*7),
+                            'reject': np.array([False]*7, dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_tau(self):
+        exp1 = pd.DataFrame({'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
+                            'reject': np.array([True, False, False, False,
+                                                False, False, False, False,
+                                                False], dtype=bool)})
+        exp2 = pd.DataFrame({'W': np.array([17, 17, 5, 6, 16, 5, 7, 5,
+                                            4, 5, 8, 4, 5, 16, 5, 11, 4, 6]),
+                            'reject': np.array([True, True, False, False,
+                                                True, False, False, False,
+                                                False, False, False, False,
+                                                False, True, False, False,
+                                                False, False],  dtype=bool)})
+        exp3 = pd.DataFrame({'W': np.array([16, 16, 17, 10, 17, 16, 16,
+                                            15, 15, 15, 13, 10, 10, 10,
+                                            9, 9, 9, 9]),
+                            'reject': np.array([True, True, True, False,
+                                                True, True, True, True,
+                                                True, True, True, False,
+                                                False, False, False, False,
+                                                False, False],  dtype=bool)})
+
+        result1 = ancom(self.table4, self.cats4, tau=0.25)
+        result2 = ancom(self.table9, self.cats9, tau=0.02)
+        result3 = ancom(self.table10, self.cats10, tau=0.02)
+
+        assert_data_frame_almost_equal(result1, exp1)
+        assert_data_frame_almost_equal(result2, exp2)
+        assert_data_frame_almost_equal(result3, exp3)
+
+    def test_ancom_theta(self):
+        result = ancom(self.table1, self.cats1, theta=0.3)
+        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+                            'reject': np.array([True, True, False, False,
+                                                False, False, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_alpha(self):
+        result = ancom(self.table1, self.cats1, alpha=0.5)
+        exp = pd.DataFrame({'W': np.array([6, 6, 4, 5, 5, 4, 2]),
+                            'reject': np.array([True, True, False, True,
+                                                True, False, False],
+                                               dtype=bool)})
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_fail_type(self):
+        with self.assertRaises(TypeError):
+            ancom(self.table1.values, self.cats1)
+        with self.assertRaises(TypeError):
+            ancom(self.table1, self.cats1.values)
+
+    def test_ancom_fail_zeros(self):
+        with self.assertRaises(ValueError):
+            ancom(self.bad1, self.cats2, multiple_comparisons_correction=None)
+
+    def test_ancom_fail_negative(self):
+        with self.assertRaises(ValueError):
+            ancom(self.bad2, self.cats2, multiple_comparisons_correction=None)
+
+    def test_ancom_fail_not_implemented_multiple_comparisons_correction(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table2, self.cats2,
+                  multiple_comparisons_correction='fdr')
+
+    def test_ancom_fail_missing(self):
+        with self.assertRaises(ValueError):
+            ancom(self.bad3, self.cats1)
+
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.badcats1)
+
+    def test_ancom_fail_groups(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.badcats2)
+
+    def test_ancom_fail_size_mismatch(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.badcats3)
+
+    def test_ancom_fail_group_unique(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.badcats4)
+
+    def test_ancom_fail_1_group(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.badcats5)
+
+    def test_ancom_fail_tau(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.cats1, tau=-1)
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.cats1, tau=1.1)
+
+    def test_ancom_fail_theta(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.cats1, theta=-1)
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.cats1, theta=1.1)
+
+    def test_ancom_fail_alpha(self):
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.cats1, alpha=-1)
+        with self.assertRaises(ValueError):
+            ancom(self.table1, self.cats1, alpha=1.1)
+
+    def test_ancom_fail_multiple_groups(self):
+        with self.assertRaises(TypeError):
+            ancom(self.table4, self.cats4,
+                  significance_test=scipy.stats.ttest_ind)
+
+    def test_holm_bonferroni(self):
+        p = [0.005, 0.011, 0.02, 0.04, 0.13]
+        corrected_p = p * np.arange(1, 6)[::-1]
+        guessed_p = _holm_bonferroni(p)
+        for a, b in zip(corrected_p, guessed_p):
+            self.assertAlmostEqual(a, b)
+
+
 if __name__ == "__main__":
     main()
diff --git a/skbio/stats/tests/test_gradient.py b/skbio/stats/tests/test_gradient.py
index 1515e90..96fb233 100644
--- a/skbio/stats/tests/test_gradient.py
+++ b/skbio/stats/tests/test_gradient.py
@@ -409,10 +409,10 @@ class GroupResultsTests(BaseTests):
             obs_out_f.close()
             obs_raw_f.close()
 
-            with open(get_data_path(out_fp), 'U') as f:
+            with open(get_data_path(out_fp)) as f:
                 exp_out = f.read()
 
-            with open(get_data_path(raw_fp), 'U') as f:
+            with open(get_data_path(raw_fp)) as f:
                 exp_raw = f.read()
 
             self.assertEqual(obs_out, exp_out)
@@ -433,10 +433,10 @@ class CategoryResultsTests(BaseTests):
             obs_out_f.close()
             obs_raw_f.close()
 
-            with open(get_data_path(out_fp), 'U') as f:
+            with open(get_data_path(out_fp)) as f:
                 exp_out = f.read()
 
-            with open(get_data_path(raw_fp), 'U') as f:
+            with open(get_data_path(raw_fp)) as f:
                 exp_raw = f.read()
 
             self.assertEqual(obs_out, exp_out)
@@ -457,10 +457,10 @@ class GradientANOVAResultsTests(BaseTests):
             obs_out_f.close()
             obs_raw_f.close()
 
-            with open(get_data_path(out_fp), 'U') as f:
+            with open(get_data_path(out_fp)) as f:
                 exp_out = f.read()
 
-            with open(get_data_path(raw_fp), 'U') as f:
+            with open(get_data_path(raw_fp)) as f:
                 exp_raw = f.read()
 
             self.assertEqual(obs_out, exp_out)
diff --git a/skbio/stats/tests/test_power.py b/skbio/stats/tests/test_power.py
index dcad706..79576b4 100644
--- a/skbio/stats/tests/test_power.py
+++ b/skbio/stats/tests/test_power.py
@@ -26,7 +26,6 @@ from skbio.stats.power import (subsample_power,
                                _identify_sample_groups,
                                _draw_paired_samples,
                                _get_min_size,
-                               bootstrap_power_curve,
                                paired_subsamples
                                )
 
@@ -403,23 +402,6 @@ class PowerAnalysisTest(TestCase):
         # Checks the samples returned sanely
         npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
 
-    def test_bootstrap_power_curve(self):
-        # Sets the known values
-        known_mean = np.array([0.500, 0.82, 0.965, 0.995, 1.000, 1.000,
-                               1.000, 1.000,  1.000])
-        known_bound = np.array([0.03, 0.02, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00,
-                                0.00])
-
-        # Generates the test values
-        test_mean, test_bound = bootstrap_power_curve(self.f,
-                                                      self.pop,
-                                                      self.num_samps,
-                                                      num_iter=100)
-
-        # Checks the function returned sanely
-        npt.assert_allclose(test_mean, known_mean, rtol=0.05, atol=0.05)
-        npt.assert_allclose(test_bound, known_bound, rtol=0.1, atol=0.01)
-
     def test_paired_subsamples_default(self):
         # Sets the known np.array set
         known_array = [{'MM', 'SR', 'TS', 'GW', 'PP', 'WM'},
diff --git a/skbio/stats/tests/test_spatial.py b/skbio/stats/tests/test_spatial.py
deleted file mode 100644
index 034bdd7..0000000
--- a/skbio/stats/tests/test_spatial.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-
-from unittest import TestCase, main
-
-import numpy as np
-
-from skbio.stats.spatial import (procrustes, _get_disparity, _center,
-                                 _normalize)
-
-
-class ProcrustesTests(TestCase):
-
-    """test the procrustes module, using floating point numpy arrays
-    """
-
-    def setUp(self):
-        """creates inputs"""
-        # an L
-        self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
-
-        # a larger, shifted, mirrored L
-        self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
-
-        # an L shifted up 1, right 1, and with point 4 shifted an extra .5
-        # to the right
-        # pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
-        self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
-
-        # data4, data5 are standardized (trace(A*A') = 1).
-        # procrustes should return an identical copy if they are used
-        # as the first matrix argument.
-        shiftangle = np.pi / 8
-        self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
-                              [0, -1]], 'd') / np.sqrt(4)
-        self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
-                              [np.cos(np.pi / 2 - shiftangle),
-                               np.sin(np.pi / 2 - shiftangle)],
-                              [-np.cos(shiftangle),
-                               -np.sin(shiftangle)],
-                              [-np.cos(np.pi / 2 - shiftangle),
-                               -np.sin(np.pi / 2 - shiftangle)]],
-                              'd') / np.sqrt(4)
-
-    def test_procrustes(self):
-        """tests procrustes' ability to match two matrices.
-
-        the second matrix is a rotated, shifted, scaled, and mirrored version
-        of the first, in two dimensions only
-        """
-        # can shift, mirror, and scale an 'L'?
-        a, b, disparity = procrustes(self.data1, self.data2)
-        np.testing.assert_allclose(b, a)
-        np.testing.assert_almost_equal(disparity, 0.)
-
-        # if first mtx is standardized, leaves first mtx unchanged?
-        m4, m5, disp45 = procrustes(self.data4, self.data5)
-        np.testing.assert_equal(m4, self.data4)
-
-        # at worst, data3 is an 'L' with one point off by .5
-        m1, m3, disp13 = procrustes(self.data1, self.data3)
-        self.assertTrue(disp13 < 0.5 ** 2)
-
-    def test_procrustes2(self):
-        """procrustes disparity should not depend on order of matrices"""
-        m1, m3, disp13 = procrustes(self.data1, self.data3)
-        m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
-        np.testing.assert_almost_equal(disp13, disp31)
-
-        # try with 3d, 8 pts per
-        rand1 = np.array([[2.61955202,  0.30522265,  0.55515826],
-                         [0.41124708, -0.03966978, -0.31854548],
-                         [0.91910318,  1.39451809, -0.15295084],
-                         [2.00452023,  0.50150048,  0.29485268],
-                         [0.09453595,  0.67528885,  0.03283872],
-                         [0.07015232,  2.18892599, -1.67266852],
-                         [0.65029688,  1.60551637,  0.80013549],
-                         [-0.6607528,  0.53644208,  0.17033891]])
-
-        rand3 = np.array([[0.0809969,  0.09731461, -0.173442],
-                         [-1.84888465, -0.92589646, -1.29335743],
-                         [0.67031855, -1.35957463,  0.41938621],
-                         [0.73967209, -0.20230757,  0.52418027],
-                         [0.17752796,  0.09065607,  0.29827466],
-                         [0.47999368, -0.88455717, -0.57547934],
-                         [-0.11486344, -0.12608506, -0.3395779],
-                         [-0.86106154, -0.28687488,  0.9644429]])
-        res1, res3, disp13 = procrustes(rand1, rand3)
-        res3_2, res1_2, disp31 = procrustes(rand3, rand1)
-        np.testing.assert_almost_equal(disp13, disp31)
-
-    def test_procrustes_shape_mismatch(self):
-        with self.assertRaises(ValueError):
-            procrustes(np.array([[1, 2], [3, 4]]),
-                       np.array([[5, 6, 7], [8, 9, 10]]))
-
-    def test_procrustes_empty_rows_or_cols(self):
-        empty = np.array([[]])
-        with self.assertRaises(ValueError):
-            procrustes(empty, empty)
-
-    def test_procrustes_no_variation(self):
-        with self.assertRaises(ValueError):
-            procrustes(np.array([[42, 42], [42, 42]]),
-                       np.array([[45, 45], [45, 45]]))
-
-    def test_get_disparity(self):
-        """tests get_disparity"""
-        disp = _get_disparity(self.data1, self.data3)
-        disp2 = _get_disparity(self.data3, self.data1)
-        np.testing.assert_equal(disp, disp2)
-        np.testing.assert_equal(disp, (3. * 2. + (1. + 1.5 ** 2)))
-
-        d1 = np.append(self.data1, self.data1, 0)
-        d3 = np.append(self.data3, self.data3, 0)
-
-        disp3 = _get_disparity(d1, d3)
-        disp4 = _get_disparity(d3, d1)
-        np.testing.assert_equal(disp3, disp4)
-        # 2x points in same configuration should give 2x disparity
-        np.testing.assert_equal(disp3, 2. * disp)
-
-    def test_center(self):
-        centered_mtx = _center(self.data1)
-        column_means = centered_mtx.mean(0)
-        for col_mean in column_means:
-            np.testing.assert_equal(col_mean, 0.)
-
-    def test_normalize(self):
-        norm_mtx = _normalize(self.data1)
-        np.testing.assert_equal(np.trace(np.dot(norm_mtx,
-                                                np.transpose(norm_mtx))), 1.)
-
-    # match_points isn't yet tested, as it's almost a private function
-    # and test_procrustes() tests it implicitly.
-
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/tests/test_base.py b/skbio/tests/test_base.py
index eb28cc6..66562fb 100644
--- a/skbio/tests/test_base.py
+++ b/skbio/tests/test_base.py
@@ -7,10 +7,24 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
+import six
+from six import binary_type, text_type
 
 import unittest
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from IPython.core.display import Image, SVG
+from nose.tools import assert_is_instance, assert_true
 
-from skbio._base import SkbioObject
+from skbio import OrdinationResults
+from skbio._base import (SkbioObject, MetadataMixin, PositionalMetadataMixin,
+                         ElasticLines)
+from skbio.util._decorator import overrides
+from skbio.util._testing import (ReallyEqualMixin, MetadataMixinTests,
+                                 PositionalMetadataMixinTests)
 
 
 class TestSkbioObject(unittest.TestCase):
@@ -22,5 +36,366 @@ class TestSkbioObject(unittest.TestCase):
             Foo()
 
 
+class TestMetadataMixin(unittest.TestCase, ReallyEqualMixin,
+                        MetadataMixinTests):
+    def setUp(self):
+        class ExampleMetadataMixin(MetadataMixin):
+            def __init__(self, metadata=None):
+                MetadataMixin._init_(self, metadata=metadata)
+
+            def __eq__(self, other):
+                return MetadataMixin._eq_(self, other)
+
+            def __ne__(self, other):
+                return MetadataMixin._ne_(self, other)
+
+            def __copy__(self):
+                copy = self.__class__(metadata=None)
+                copy._metadata = MetadataMixin._copy_(self)
+                return copy
+
+            def __deepcopy__(self, memo):
+                copy = self.__class__(metadata=None)
+                copy._metadata = MetadataMixin._deepcopy_(self, memo)
+                return copy
+
+        self._metadata_constructor_ = ExampleMetadataMixin
+
+
+class TestPositionalMetadataMixin(unittest.TestCase, ReallyEqualMixin,
+                                  PositionalMetadataMixinTests):
+    def setUp(self):
+        class ExamplePositionalMetadataMixin(PositionalMetadataMixin):
+            @overrides(PositionalMetadataMixin)
+            def _positional_metadata_axis_len_(self):
+                return self._axis_len
+
+            def __init__(self, axis_len, positional_metadata=None):
+                self._axis_len = axis_len
+
+                PositionalMetadataMixin._init_(
+                    self, positional_metadata=positional_metadata)
+
+            def __eq__(self, other):
+                return PositionalMetadataMixin._eq_(self, other)
+
+            def __ne__(self, other):
+                return PositionalMetadataMixin._ne_(self, other)
+
+            def __copy__(self):
+                copy = self.__class__(self._axis_len, positional_metadata=None)
+                copy._positional_metadata = \
+                    PositionalMetadataMixin._copy_(self)
+                return copy
+
+            def __deepcopy__(self, memo):
+                copy = self.__class__(self._axis_len, positional_metadata=None)
+                copy._positional_metadata = \
+                    PositionalMetadataMixin._deepcopy_(self, memo)
+                return copy
+
+        self._positional_metadata_constructor_ = ExamplePositionalMetadataMixin
+
+
+class TestOrdinationResults(unittest.TestCase):
+    def setUp(self):
+        # Define in-memory CA results to serialize and deserialize.
+        eigvals = pd.Series([0.0961330159181, 0.0409418140138], ['CA1', 'CA2'])
+        features = np.array([[0.408869425742, 0.0695518116298],
+                             [-0.1153860437, -0.299767683538],
+                             [-0.309967102571, 0.187391917117]])
+        samples = np.array([[-0.848956053187, 0.882764759014],
+                            [-0.220458650578, -1.34482000302],
+                            [1.66697179591, 0.470324389808]])
+        features_ids = ['Species1', 'Species2', 'Species3']
+        sample_ids = ['Site1', 'Site2', 'Site3']
+
+        samples_df = pd.DataFrame(samples, index=sample_ids,
+                                  columns=['CA1', 'CA2'])
+        features_df = pd.DataFrame(features, index=features_ids,
+                                   columns=['CA1', 'CA2'])
+
+        self.ordination_results = OrdinationResults(
+            'CA', 'Correspondance Analysis', eigvals=eigvals,
+            samples=samples_df, features=features_df)
+
+        # DataFrame for testing plot method. Has a categorical column with a
+        # mix of numbers and strings. Has a numeric column with a mix of ints,
+        # floats, and strings that can be converted to floats. Has a numeric
+        # column with missing data (np.nan).
+        self.df = pd.DataFrame([['foo', '42', 10],
+                                [22, 0, 8],
+                                [22, -4.2, np.nan],
+                                ['foo', '42.19', 11]],
+                               index=['A', 'B', 'C', 'D'],
+                               columns=['categorical', 'numeric', 'nancolumn'])
+
+        # Minimal ordination results for easier testing of plotting method.
+        # Paired with df above.
+        eigvals = np.array([0.50, 0.25, 0.25])
+        samples = np.array([[0.1, 0.2, 0.3],
+                            [0.2, 0.3, 0.4],
+                            [0.3, 0.4, 0.5],
+                            [0.4, 0.5, 0.6]])
+        samples_df = pd.DataFrame(samples, ['A', 'B', 'C', 'D'],
+                                  ['PC1', 'PC2', 'PC3'])
+
+        self.min_ord_results = OrdinationResults(
+            'PCoA', 'Principal Coordinate Analysis', eigvals, samples_df)
+
+    def test_str(self):
+        exp = ("Ordination results:\n"
+               "\tMethod: Correspondance Analysis (CA)\n"
+               "\tEigvals: 2\n"
+               "\tProportion explained: N/A\n"
+               "\tFeatures: 3x2\n"
+               "\tSamples: 3x2\n"
+               "\tBiplot Scores: N/A\n"
+               "\tSample constraints: N/A\n"
+               "\tFeature IDs: 'Species1', 'Species2', 'Species3'\n"
+               "\tSample IDs: 'Site1', 'Site2', 'Site3'")
+        obs = str(self.ordination_results)
+        self.assertEqual(obs, exp)
+
+        # all optional attributes missing
+        exp = ("Ordination results:\n"
+               "\tMethod: Principal Coordinate Analysis (PCoA)\n"
+               "\tEigvals: 1\n"
+               "\tProportion explained: N/A\n"
+               "\tFeatures: N/A\n"
+               "\tSamples: 2x1\n"
+               "\tBiplot Scores: N/A\n"
+               "\tSample constraints: N/A\n"
+               "\tFeature IDs: N/A\n"
+               "\tSample IDs: 0, 1")
+        samples_df = pd.DataFrame(np.array([[1], [2]]))
+        obs = str(OrdinationResults('PCoA', 'Principal Coordinate Analysis',
+                                    pd.Series(np.array([4.2])), samples_df))
+        self.assertEqual(obs.split('\n'), exp.split('\n'))
+
+    def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
+                                  exp_legend_exists, exp_xlabel, exp_ylabel,
+                                  exp_zlabel):
+        # check type
+        assert_is_instance(fig, mpl.figure.Figure)
+
+        # check number of subplots
+        axes = fig.get_axes()
+        npt.assert_equal(len(axes), exp_num_subplots)
+
+        # check title
+        ax = axes[0]
+        npt.assert_equal(ax.get_title(), exp_title)
+
+        # shouldn't have tick labels
+        for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
+                           ax.get_zticklabels()):
+            npt.assert_equal(tick_label.get_text(), '')
+
+        # check if legend is present
+        legend = ax.get_legend()
+        if exp_legend_exists:
+            assert_true(legend is not None)
+        else:
+            assert_true(legend is None)
+
+        # check axis labels
+        npt.assert_equal(ax.get_xlabel(), exp_xlabel)
+        npt.assert_equal(ax.get_ylabel(), exp_ylabel)
+        npt.assert_equal(ax.get_zlabel(), exp_zlabel)
+
+    def test_plot_no_metadata(self):
+        fig = self.min_ord_results.plot()
+        self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
+
+    def test_plot_with_numeric_metadata_and_plot_options(self):
+        fig = self.min_ord_results.plot(
+            self.df, 'numeric', axes=(1, 0, 2),
+            axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
+        self.check_basic_figure_sanity(
+            fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
+
+    def test_plot_with_categorical_metadata_and_plot_options(self):
+        fig = self.min_ord_results.plot(
+            self.df, 'categorical', axes=[2, 0, 1], title='a title',
+            cmap='Accent')
+        self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
+
+    def test_plot_with_invalid_axis_labels(self):
+        with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
+            self.min_ord_results.plot(axes=[2, 0, 1],
+                                      axis_labels=('a', 'b', 'c', 'd'))
+
+    def test_validate_plot_axes_valid_input(self):
+        # shouldn't raise an error on valid input. nothing is returned, so
+        # nothing to check here
+        samples = self.min_ord_results.samples.values.T
+        self.min_ord_results._validate_plot_axes(samples, (1, 2, 0))
+
+    def test_validate_plot_axes_invalid_input(self):
+        # not enough dimensions
+        with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
+            self.min_ord_results._validate_plot_axes(
+                np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
+
+        coord_matrix = self.min_ord_results.samples.values.T
+
+        # wrong number of axes
+        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, [])
+        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
+            self.min_ord_results._validate_plot_axes(coord_matrix,
+                                                     (0, 1, 2, 3))
+
+        # duplicate axes
+        with six.assertRaisesRegex(self, ValueError, 'must be unique'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
+
+        # out of range axes
+        with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
+        with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
+
+    def test_get_plot_point_colors_invalid_input(self):
+        # column provided without df
+        with npt.assert_raises(ValueError):
+            self.min_ord_results._get_plot_point_colors(None, 'numeric',
+                                                        ['B', 'C'], 'jet')
+
+        # df provided without column
+        with npt.assert_raises(ValueError):
+            self.min_ord_results._get_plot_point_colors(self.df, None,
+                                                        ['B', 'C'], 'jet')
+
+        # column not in df
+        with six.assertRaisesRegex(self, ValueError, 'missingcol'):
+            self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
+                                                        ['B', 'C'], 'jet')
+
+        # id not in df
+        with six.assertRaisesRegex(self, ValueError, 'numeric'):
+            self.min_ord_results._get_plot_point_colors(
+                self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
+
+        # missing data in df
+        with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
+            self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
+                                                        ['B', 'C', 'A'], 'jet')
+
+    def test_get_plot_point_colors_no_df_or_column(self):
+        obs = self.min_ord_results._get_plot_point_colors(None, None,
+                                                          ['B', 'C'], 'jet')
+        npt.assert_equal(obs, (None, None))
+
+    def test_get_plot_point_colors_numeric_column(self):
+        # subset of the ids in df
+        exp = [0.0, -4.2, 42.0]
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'numeric', ['B', 'C', 'A'], 'jet')
+        npt.assert_almost_equal(obs[0], exp)
+        assert_true(obs[1] is None)
+
+        # all ids in df
+        exp = [0.0, 42.0, 42.19, -4.2]
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
+        npt.assert_almost_equal(obs[0], exp)
+        assert_true(obs[1] is None)
+
+    def test_get_plot_point_colors_categorical_column(self):
+        # subset of the ids in df
+        exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
+        exp_color_dict = {
+            'foo': [0.5, 0., 0., 1.],
+            22: [0., 0., 0.5, 1.]
+        }
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'categorical', ['B', 'C', 'A'], 'jet')
+        npt.assert_almost_equal(obs[0], exp_colors)
+        npt.assert_equal(obs[1], exp_color_dict)
+
+        # all ids in df
+        exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
+                      [0., 0., 0.5, 1.]]
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
+        npt.assert_almost_equal(obs[0], exp_colors)
+        # should get same color dict as before
+        npt.assert_equal(obs[1], exp_color_dict)
+
+    def test_plot_categorical_legend(self):
+        fig = plt.figure()
+        ax = fig.add_subplot(111, projection='3d')
+
+        # we shouldn't have a legend yet
+        assert_true(ax.get_legend() is None)
+
+        self.min_ord_results._plot_categorical_legend(
+            ax, {'foo': 'red', 'bar': 'green'})
+
+        # make sure we have a legend now
+        legend = ax.get_legend()
+        assert_true(legend is not None)
+
+        # do some light sanity checking to make sure our input labels and
+        # colors are present. we're not using nose.tools.assert_items_equal
+        # because it isn't available in Python 3.
+        labels = [t.get_text() for t in legend.get_texts()]
+        npt.assert_equal(sorted(labels), ['bar', 'foo'])
+
+        colors = [l.get_color() for l in legend.get_lines()]
+        npt.assert_equal(sorted(colors), ['green', 'red'])
+
+    def test_repr_png(self):
+        obs = self.min_ord_results._repr_png_()
+        assert_is_instance(obs, binary_type)
+        assert_true(len(obs) > 0)
+
+    def test_repr_svg(self):
+        obs = self.min_ord_results._repr_svg_()
+        # print_figure(format='svg') can return text or bytes depending on the
+        # version of IPython
+        assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
+        assert_true(len(obs) > 0)
+
+    def test_png(self):
+        assert_is_instance(self.min_ord_results.png, Image)
+
+    def test_svg(self):
+        assert_is_instance(self.min_ord_results.svg, SVG)
+
+
+class TestElasticLines(unittest.TestCase):
+    def setUp(self):
+        self.el = ElasticLines()
+
+    def test_empty(self):
+        self.assertEqual(self.el.to_str(), '')
+
+    def test_add_line(self):
+        self.el.add_line('foo')
+        self.assertEqual(self.el.to_str(), 'foo')
+
+    def test_add_lines(self):
+        self.el = ElasticLines()
+        self.el.add_lines(['alice', 'bob', 'carol'])
+        self.assertEqual(self.el.to_str(), 'alice\nbob\ncarol')
+
+    def test_add_separator(self):
+        self.el.add_separator()
+        self.assertEqual(self.el.to_str(), '')
+
+        self.el.add_line('foo')
+        self.assertEqual(self.el.to_str(), '---\nfoo')
+
+        self.el.add_separator()
+        self.el.add_lines(['bar', 'bazzzz'])
+        self.el.add_separator()
+
+        self.assertEqual(self.el.to_str(),
+                         '------\nfoo\n------\nbar\nbazzzz\n------')
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/skbio/tree/__init__.py b/skbio/tree/__init__.py
index 875600c..ee6a3c6 100644
--- a/skbio/tree/__init__.py
+++ b/skbio/tree/__init__.py
@@ -5,10 +5,10 @@ Tree representations (:mod:`skbio.tree`)
 .. currentmodule:: skbio.tree
 
 This module provides functionality for working with trees, including
-phylogenetic trees and hierarchies, and prefix trees (i.e., tries).
-Functionality is provided for constructing trees, for traversing in multiple
-ways, comparisons, fetching subtrees, and more. This module supports trees that
-are multifurcating and nodes that have single descendants.
+phylogenetic trees and hierarchies. Functionality is provided for constructing
+trees, for traversing in multiple ways, comparisons, fetching subtrees, and
+more. This module supports trees that are multifurcating and nodes that have
+single descendants.
 
 Classes
 -------
@@ -17,7 +17,6 @@ Classes
    :toctree: generated/
 
     TreeNode
-    CompressedTrie
 
 Phylogenetic Reconstruction
 ---------------------------
@@ -33,7 +32,6 @@ Utility Functions
 .. autosummary::
    :toctree: generated/
 
-    fasta_to_pairlist
     majority_rule
 
 Exceptions
@@ -68,7 +66,7 @@ tips A and B.
 
 Now let's construct a simple tree and dump an ASCII representation:
 
->>> tree = TreeNode.read(StringIO(u"((A, B)C, D)root;"))
+>>> tree = TreeNode.read(StringIO("((A, B)C, D)root;"))
 >>> print(tree.is_root()) # is this the root of the tree?
 True
 >>> print(tree.is_tip()) # is this node a tip?
@@ -133,24 +131,24 @@ distance is the fraction of common clades present in the two trees, where a
 distance of 0 means the trees contain identical clades, and a distance of 1
 indicates the trees do not share any common clades:
 
->>> tree1 = TreeNode.read(StringIO(u"((A, B)C, (D, E)F, (G, H)I)root;"))
->>> tree2 = TreeNode.read(StringIO(u"((G, H)C, (D, E)F, (B, A)I)root;"))
->>> tree3 = TreeNode.read(StringIO(u"((D, B)C, (A, E)F, (G, H)I)root;"))
+>>> tree1 = TreeNode.read(StringIO("((A, B)C, (D, E)F, (G, H)I)root;"))
+>>> tree2 = TreeNode.read(StringIO("((G, H)C, (D, E)F, (B, A)I)root;"))
+>>> tree3 = TreeNode.read(StringIO("((D, B)C, (A, E)F, (G, H)I)root;"))
 >>> print(tree1.compare_subsets(tree1))  # identity case
 0.0
 >>> print(tree1.compare_subsets(tree2))  # same tree but different clade order
 0.0
 >>> print(tree1.compare_subsets(tree3))  # only 1 of 3 common subsets
-0.666666666667
+0.6666666666666667
 
 We can additionally take into account branch length when computing distances
 between trees. First, we're going to construct two new trees with described
 branch length, note the difference in the Newick strings:
 
 >>> tree1 = \
-...     TreeNode.read(StringIO(u"((A:0.1, B:0.2)C:0.3, D:0.4, E:0.5)root;"))
+...     TreeNode.read(StringIO("((A:0.1, B:0.2)C:0.3, D:0.4, E:0.5)root;"))
 >>> tree2 = \
-...     TreeNode.read(StringIO(u"((A:0.4, B:0.8)C:0.3, D:0.1, E:0.5)root;"))
+...     TreeNode.read(StringIO("((A:0.4, B:0.8)C:0.3, D:0.1, E:0.5)root;"))
 
 In these two trees, we've added on a description of length from the node to
 its parent, so for instance:
@@ -173,69 +171,6 @@ pairwise tip-to-tip distances between trees:
 >>> print(tree1.compare_tip_distances(tree2))
 0.120492524415
 
-Prefix trees (i.e., tries) examples
------------------------------------
-
-Construct a Trie from a (key, value) list
-
->>> from skbio.tree import CompressedTrie
->>> pair_list = [("ab",  "0"),
-...              ("abababa", "1"),
-...              ("abab", "2"),
-...              ("baba", "3"),
-...              ("ababaa", "4"),
-...              ("a", "5"),
-...              ("abababa", "6"),
-...              ("bab", "7"),
-...              ("babba", "8")]
->>> t = CompressedTrie(pair_list)
-
-Get the number of keys stored in the trie
-
->>> len(t)
-9
-
-Get the number of nodes in the trie
-
->>> t.size
-10
-
-Get the trie's prefix map
-
->>> t.prefix_map
-{'1': ['6', '2', '0', '5'], '8': ['7'], '3': [], '4': []}
-
-Find the value attached to a given key
-
->>> t.find("ababaa")
-['4']
-
-Add a new (key, value) pair to the Trie
-
->>> t.insert("bac", "9")
->>> t.find("bac")
-['9']
->>> t.prefix_map
-{'1': ['6', '2', '0', '5'], '9': [], '3': [], '4': [], '8': ['7']}
-
-Create a new trie with a list of sequences
-
->>> from skbio.tree import fasta_to_pairlist
-
->>> seqs = [("s0", "ACA"),
-...         ("s1", "ACAGTC"),
-...         ("s2", "ACTA"),
-...         ("s3", "CAGT"),
-...         ("s4", "CATGAA"),
-...         ("s5", "A"),
-...         ("s6", "CATGTA"),
-...         ("s7", "CACCA")]
-
->>> t = CompressedTrie(fasta_to_pairlist(seqs))
-
->>> t.prefix_map
-{'s3': [], 's2': [], 's1': ['s0', 's5'], 's7': [], 's6': [], 's4': []}
-
 """
 
 # ----------------------------------------------------------------------------
@@ -251,14 +186,12 @@ from __future__ import absolute_import, division, print_function
 from skbio.util import TestRunner
 
 from ._tree import TreeNode
-from ._trie import CompressedTrie, fasta_to_pairlist
 from ._nj import nj
 from ._majority_rule import majority_rule
 from ._exception import (TreeError, NoLengthError, DuplicateNodeError,
                          MissingNodeError, NoParentError)
 
-__all__ = ['TreeNode', 'CompressedTrie', 'fasta_to_pairlist', 'nj',
-           'majority_rule', 'TreeError', 'NoLengthError', 'DuplicateNodeError',
-           'MissingNodeError', 'NoParentError']
+__all__ = ['TreeNode', 'nj', 'majority_rule', 'TreeError', 'NoLengthError',
+           'DuplicateNodeError', 'MissingNodeError', 'NoParentError']
 
 test = TestRunner(__file__).test
diff --git a/skbio/tree/_majority_rule.py b/skbio/tree/_majority_rule.py
index 84942c8..4bebb8f 100644
--- a/skbio/tree/_majority_rule.py
+++ b/skbio/tree/_majority_rule.py
@@ -231,69 +231,40 @@ def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
     >>> from skbio.tree import TreeNode
     >>> from io import StringIO
     >>> trees = [
-    ... TreeNode.read(StringIO(u"(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(E,(G,((F,I),(((J,H),D),C))))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(E,((F,I),(G,(((J,H),D),C))))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));")),
-    ... TreeNode.read(StringIO(u"(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));"))]
+    ... TreeNode.read(StringIO("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));")),
+    ... TreeNode.read(StringIO("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));")),
+    ... TreeNode.read(StringIO("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));")),
+    ... TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));")),
+    ... TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));")),
+    ... TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));")),
+    ... TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));")),
+    ... TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));")),
+    ... TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));"))]
     >>> consensus = majority_rule(trees, cutoff=0.5)[0]
-    >>> print(consensus.ascii_art())
-                                  /-E
-                                 |
-                                 |          /-G
-                        /--------|         |
-                       |         |         |          /-F
-                       |         |         |---------|
-                       |          \--------|          \-I
-                       |                   |
-                       |                   |          /-C
-              /--------|                   |         |
-             |         |                    \--------|          /-D
-             |         |                             |         |
-             |         |                              \--------|--J
-    ---------|         |                                       |
-             |         |                                        \-H
-             |         |
-             |          \-B
-             |
-              \-A
-    >>> for node in consensus.non_tips():
+    >>> for node in sorted(consensus.non_tips(),
+    ...                    key=lambda k: k.count(tips=True)):
     ...     support_value = node.support
-    ...     names = ' '.join([n.name for n in node.tips()])
+    ...     names = ' '.join(sorted(n.name for n in node.tips()))
     ...     print("Tips: %s, support: %s" % (names, support_value))
     Tips: F I, support: 9.0
-    Tips: D J H, support: 6.0
-    Tips: C D J H, support: 6.0
-    Tips: G F I C D J H, support: 6.0
-    Tips: E G F I C D J H, support: 9.0
-    Tips: E G F I C D J H B, support: 9.0
+    Tips: D H J, support: 6.0
+    Tips: C D H J, support: 6.0
+    Tips: C D F G H I J, support: 6.0
+    Tips: C D E F G H I J, support: 9.0
+    Tips: B C D E F G H I J, support: 9.0
 
     In the next example, multiple trees will be returned which can happen if
     clades are not well supported across the trees. In addition, this can arise
     if not all tips are present across all trees.
 
     >>> trees = [
-    ...     TreeNode.read(StringIO(u"((a,b),(c,d),(e,f));")),
-    ...     TreeNode.read(StringIO(u"(a,(c,d),b,(e,f));")),
-    ...     TreeNode.read(StringIO(u"((c,d),(e,f),b);")),
-    ...     TreeNode.read(StringIO(u"(a,(c,d),(e,f));"))]
+    ...     TreeNode.read(StringIO("((a,b),(c,d),(e,f));")),
+    ...     TreeNode.read(StringIO("(a,(c,d),b,(e,f));")),
+    ...     TreeNode.read(StringIO("((c,d),(e,f),b);")),
+    ...     TreeNode.read(StringIO("(a,(c,d),(e,f));"))]
     >>> consensus_trees = majority_rule(trees)
-    >>> print(len(consensus_trees))
+    >>> len(consensus_trees)
     4
-    >>> for tree in consensus_trees:
-    ...     print(tree.ascii_art())
-    --b
-    --a
-              /-f
-    ---------|
-              \-e
-              /-d
-    ---------|
-              \-c
 
     """
     if weights is None:
@@ -301,7 +272,7 @@ def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
     else:
         weights = np.asarray(weights)
         if len(weights) != len(trees):
-            raise ValueError("Number of weights and trees differ!")
+            raise ValueError("Number of weights and trees differ.")
 
     cutoff_threshold = cutoff * weights.sum()
 
diff --git a/skbio/tree/_tree.py b/skbio/tree/_tree.py
index 13f2e41..7256c5f 100644
--- a/skbio/tree/_tree.py
+++ b/skbio/tree/_tree.py
@@ -8,7 +8,8 @@
 
 from __future__ import absolute_import, division, print_function
 
-from operator import or_
+import warnings
+from operator import or_, itemgetter
 from copy import deepcopy
 from itertools import combinations
 from functools import reduce
@@ -23,7 +24,8 @@ from skbio._base import SkbioObject
 from skbio.stats.distance import DistanceMatrix
 from ._exception import (NoLengthError, DuplicateNodeError, NoParentError,
                          MissingNodeError, TreeError)
-from skbio.util._decorator import experimental
+from skbio.util import RepresentationWarning
+from skbio.util._decorator import experimental, classonlymethod
 
 
 def distance_from_r(m1, m2):
@@ -114,7 +116,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c, d)root;"])
+        >>> tree = TreeNode.read(["((a,b)c, d)root;"])
         >>> repr(tree)
         '<TreeNode, name: root, internal node count: 1, tips count: 3>'
 
@@ -145,7 +147,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> str(tree)
         '((a,b)c);\n'
 
@@ -262,7 +264,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"(a,b)c;"])
+        >>> tree = TreeNode.read(["(a,b)c;"])
         >>> print(tree.pop(0))
         a;
         <BLANKLINE>
@@ -301,7 +303,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"(a,b)c;"])
+        >>> tree = TreeNode.read(["(a,b)c;"])
         >>> tree.remove(tree.children[0])
         True
 
@@ -332,7 +334,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"(a,b)c;"])
+        >>> tree = TreeNode.read(["(a,b)c;"])
         >>> tree.remove_deleted(lambda x: x.name == 'b')
         >>> print(tree)
         (a)c;
@@ -366,7 +368,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
         >>> to_delete = tree.find('b')
         >>> tree.remove_deleted(lambda x: x == to_delete)
         >>> print(tree)
@@ -426,7 +428,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> t = TreeNode.read([u'((H:1,G:1):2,(R:0.5,M:0.7):3);'])
+        >>> t = TreeNode.read(['((H:1,G:1):2,(R:0.5,M:0.7):3);'])
         >>> sheared = t.shear(['G', 'M'])
         >>> print(sheared)
         (G:3.0,M:3.7);
@@ -438,7 +440,7 @@ class TreeNode(SkbioObject):
         ids = set(names)
 
         if not ids.issubset(all_tips):
-            raise ValueError("ids are not a subset of the tree!")
+            raise ValueError("ids are not a subset of the tree.")
 
         while len(list(tcopy.tips())) != len(ids):
             for n in list(tcopy.tips()):
@@ -470,7 +472,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
         >>> tree_copy = tree.copy()
         >>> tree_nodes = set([id(n) for n in tree.traverse()])
         >>> tree_copy_nodes = set([id(n) for n in tree_copy.traverse()])
@@ -542,7 +544,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
+        >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
         >>> new_tree = tree.find('d').unrooted_deepcopy()
         >>> print(new_tree)
         (b,c,(a,((f,g)h)e)d)root;
@@ -589,7 +591,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
+        >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
         >>> new_tree = tree.find('d').unrooted_copy()
         >>> print(new_tree)
         (b,c,(a,((f,g)h)e)d)root;
@@ -638,7 +640,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
+        >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
         >>> print(tree.count())
         9
         >>> print(tree.count(tips=True))
@@ -650,6 +652,46 @@ class TreeNode(SkbioObject):
         else:
             return len(list(self.traverse(include_self=True)))
 
+    @experimental(as_of="0.4.1")
+    def observed_node_counts(self, tip_counts):
+        """Returns counts of node observations from counts of tip observations
+
+        Parameters
+        ----------
+        tip_counts : dict of ints
+            Counts of observations of tips. Keys correspond to tip names in
+            ``self``, and counts are unsigned ints.
+
+        Returns
+        -------
+        dict
+            Counts of observations of nodes. Keys correspond to node names
+            (internal nodes or tips), and counts are unsigned ints.
+
+        Raises
+        ------
+        ValueError
+            If a count less than one is observed.
+        MissingNodeError
+            If a count is provided for a tip not in the tree, or for an
+            internal node.
+
+        """
+        result = defaultdict(int)
+        for tip_name, count in tip_counts.items():
+            if count < 1:
+                raise ValueError("All tip counts must be greater than zero.")
+            else:
+                t = self.find(tip_name)
+                if not t.is_tip():
+                    raise MissingNodeError(
+                        "Counts can only be for tips in the tree. %s is an "
+                        "internal node." % t.name)
+                result[t] += count
+                for internal_node in t.ancestors():
+                    result[internal_node] += count
+        return result
+
     @experimental(as_of="0.4.0")
     def subtree(self, tip_list=None):
         r"""Make a copy of the subtree"""
@@ -674,9 +716,9 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
+        >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
         >>> sorted(tree.subset())
-        [u'a', u'b', u'c', u'f', u'g']
+        ['a', 'b', 'c', 'f', 'g']
         """
         return frozenset({i.name for i in self.tips()})
 
@@ -700,12 +742,11 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"(((a,b)c,(d,e)f)h)root;"])
-        >>> for s in sorted(tree.subsets()):
-        ...     print(sorted(s))
-        [u'a', u'b']
-        [u'd', u'e']
-        [u'a', u'b', u'd', u'e']
+        >>> tree = TreeNode.read(["(((a,b)c,(d,e)f)h)root;"])
+        >>> subsets = tree.subsets()
+        >>> len(subsets)
+        3
+
         """
         sets = []
         for i in self.postorder(include_self=False):
@@ -748,7 +789,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"(((a,b)c,(d,e)f)g,h)i;"])
+        >>> tree = TreeNode.read(["(((a,b)c,(d,e)f)g,h)i;"])
         >>> print(tree.root_at('c'))
         (a,b,((d,e)f,(h)g)c)root;
         <BLANKLINE>
@@ -791,7 +832,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)a:1;"])
+        >>> tree = TreeNode.read(["(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)a:1;"])
         >>> print(tree.root_at_midpoint())
         ((d:1.0,e:1.0,(g:1.0)f:1.0)c:0.5,((h:1.0)b:1.0):0.5)root;
         <BLANKLINE>
@@ -858,7 +899,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> print(tree.is_tip())
         False
         >>> print(tree.find('a').is_tip())
@@ -884,7 +925,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> print(tree.is_root())
         True
         >>> print(tree.find('a').is_root())
@@ -910,7 +951,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> print(tree.has_children())
         True
         >>> print(tree.find('a').has_children())
@@ -959,7 +1000,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> for node in tree.traverse():
         ...     print(node.name)
         None
@@ -1005,7 +1046,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> for node in tree.preorder():
         ...     print(node.name)
         None
@@ -1052,7 +1093,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> for node in tree.postorder():
         ...     print(node.name)
         a
@@ -1120,7 +1161,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c);"])
+        >>> tree = TreeNode.read(["((a,b)c);"])
         >>> for node in tree.pre_and_postorder():
         ...     print(node.name)
         None
@@ -1195,7 +1236,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
         >>> for node in tree.levelorder():
         ...     print(node.name)
         None
@@ -1244,7 +1285,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
         >>> for node in tree.tips():
         ...     print(node.name)
         a
@@ -1288,7 +1329,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
         >>> for node in tree.non_tips():
         ...     print(node.name)
         c
@@ -1371,7 +1412,7 @@ class TreeNode(SkbioObject):
                 if node.is_tip():
                     if name in tip_cache:
                         raise DuplicateNodeError("Tip with name '%s' already "
-                                                 "exists!" % name)
+                                                 "exists." % name)
 
                     tip_cache[name] = node
                 else:
@@ -1412,7 +1453,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio.tree import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)d,(f,g)c);"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)d,(f,g)c);"])
         >>> for node in tree.find_all('c'):
         ...     print(node.name, node.children[0].name, node.children[1].name)
         c a b
@@ -1481,7 +1522,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
         >>> print(tree.find('c').name)
         c
         """
@@ -1537,7 +1578,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
         >>> print(tree.find_by_id(2).name)
         d
 
@@ -1584,10 +1625,10 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
         >>> func = lambda x: x.parent == tree.find('c')
         >>> [n.name for n in tree.find_by_func(func)]
-        [u'a', u'b']
+        ['a', 'b']
         """
         for node in self.traverse(include_self=True):
             if func(node):
@@ -1608,9 +1649,9 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
         >>> [node.name for node in tree.find('a').ancestors()]
-        [u'c', u'root']
+        ['c', 'root']
 
         """
         result = []
@@ -1633,7 +1674,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
         >>> tip_a = tree.find('a')
         >>> root = tip_a.root()
         >>> root == tree
@@ -1663,10 +1704,10 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e,f)g)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e,f)g)root;"])
         >>> tip_e = tree.find('e')
         >>> [n.name for n in tip_e.siblings()]
-        [u'd', u'f']
+        ['d', 'f']
 
         """
         if self.is_root():
@@ -1696,10 +1737,10 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
         >>> node_c = tree.find('c')
         >>> [n.name for n in node_c.neighbors()]
-        [u'a', u'b', u'root']
+        ['a', 'b', 'root']
 
         """
         nodes = [n for n in self.children + [self.parent] if n is not None]
@@ -1730,7 +1771,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
         >>> nodes = [tree.find('a'), tree.find('b')]
         >>> lca = tree.lowest_common_ancestor(nodes)
         >>> print(lca.name)
@@ -1747,7 +1788,7 @@ class TreeNode(SkbioObject):
         tips = [self.find(name) for name in tipnames]
 
         if len(tips) == 0:
-            raise ValueError("No tips found!")
+            raise ValueError("No tips found.")
 
         nodes_to_scrub = []
 
@@ -1781,7 +1822,7 @@ class TreeNode(SkbioObject):
 
     lca = lowest_common_ancestor  # for convenience
 
-    @classmethod
+    @classonlymethod
     @experimental(as_of="0.4.0")
     def from_taxonomy(cls, lineage_map):
         """Construct a tree from a taxonomy
@@ -1800,16 +1841,17 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio.tree import TreeNode
-        >>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
-        ...             '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
-        ...             '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
-        ...             '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
-        ...             '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
-        ...             '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
-        ...             '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
-        ...             '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
-        ...             '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
-        >>> tree = TreeNode.from_taxonomy(lineages.items())
+        >>> lineages = [
+        ...     ('1', ['Bacteria', 'Firmicutes', 'Clostridia']),
+        ...     ('2', ['Bacteria', 'Firmicutes', 'Bacilli']),
+        ...     ('3', ['Bacteria', 'Bacteroidetes', 'Sphingobacteria']),
+        ...     ('4', ['Archaea', 'Euryarchaeota', 'Thermoplasmata']),
+        ...     ('5', ['Archaea', 'Euryarchaeota', 'Thermoplasmata']),
+        ...     ('6', ['Archaea', 'Euryarchaeota', 'Halobacteria']),
+        ...     ('7', ['Archaea', 'Euryarchaeota', 'Halobacteria']),
+        ...     ('8', ['Bacteria', 'Bacteroidetes', 'Sphingobacteria']),
+        ...     ('9', ['Bacteria', 'Bacteroidetes', 'Cytophagia'])]
+        >>> tree = TreeNode.from_taxonomy(lineages)
         >>> print(tree.ascii_art())
                                       /Clostridia-1
                             /Firmicutes
@@ -1821,13 +1863,13 @@ class TreeNode(SkbioObject):
                  |                   |
         ---------|                    \Cytophagia-9
                  |
-                 |                              /-5
+                 |                              /-4
                  |                    /Thermoplasmata
-                 |                   |          \-4
+                 |                   |          \-5
                   \Archaea- /Euryarchaeota
-                                     |          /-7
+                                     |          /-6
                                       \Halobacteria
-                                                \-6
+                                                \-7
 
         """
         root = cls(name=None)
@@ -1874,7 +1916,7 @@ class TreeNode(SkbioObject):
             node = node.children[0]
         return distance
 
-    @classmethod
+    @classonlymethod
     @experimental(as_of="0.4.0")
     def from_linkage_matrix(cls, linkage_matrix, id_list):
         """Return tree from SciPy linkage matrix.
@@ -2010,7 +2052,7 @@ class TreeNode(SkbioObject):
                     seen.add(node.id)
 
     @experimental(as_of="0.4.0")
-    def to_array(self, attrs=None):
+    def to_array(self, attrs=None, nan_length_value=None):
         """Return an array representation of self
 
         Parameters
@@ -2019,12 +2061,17 @@ class TreeNode(SkbioObject):
             The attributes and types to return. The expected form is
             [(attribute_name, type)]. If `None`, then `name`, `length`, and
             `id` are returned.
+        nan_length_value : float, optional
+            If provided, replaces any `nan` in the branch length vector
+            (i.e., ``result['length']``) with this value. `nan` branch lengths
+            can arise from an edge not having a length (common for the root
+            node parent edge), which can making summing problematic.
 
         Returns
         -------
         dict of array
             {id_index: {id: TreeNode},
-             child_index: [(node_id, left_child_id, right_child_id)],
+             child_index: ((node_id, left_child_id, right_child_id)),
              attr_1: array(...),
              ...
              attr_N: array(...)}
@@ -2032,20 +2079,21 @@ class TreeNode(SkbioObject):
         Notes
         -----
         Attribute arrays are in index order such that TreeNode.id can be used
-        as a lookup into the the array
-
-        If `length` is an attribute, this will also record the length off the
-        root which is `nan`. Take care when summing.
+        as a lookup into the array.
 
         Examples
         --------
+        >>> from pprint import pprint
         >>> from skbio import TreeNode
-        >>> t = TreeNode.read([u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'])
+        >>> t = TreeNode.read(['(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'])
         >>> res = t.to_array()
-        >>> res.keys()
-        ['child_index', 'length', 'name', 'id_index', 'id']
+        >>> sorted(res.keys())
+        ['child_index', 'id', 'id_index', 'length', 'name']
         >>> res['child_index']
-        [(4, 0, 2), (5, 3, 3), (6, 4, 5), (7, 6, 6)]
+        array([[4, 0, 2],
+               [5, 3, 3],
+               [6, 4, 5],
+               [7, 6, 6]])
         >>> for k, v in res['id_index'].items():
         ...     print(k, v)
         ...
@@ -2068,7 +2116,7 @@ class TreeNode(SkbioObject):
         >>> res['id']
         array([0, 1, 2, 3, 4, 5, 6, 7])
         >>> res['name']
-        array([u'a', u'b', u'c', u'd', u'x', u'y', u'z', None], dtype=object)
+        array(['a', 'b', 'c', 'd', 'x', 'y', 'z', None], dtype=object)
 
         """
         if attrs is None:
@@ -2089,6 +2137,9 @@ class TreeNode(SkbioObject):
 
         results = {'id_index': id_index, 'child_index': child_index}
         results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)})
+        if nan_length_value is not None:
+            length_v = results['length']
+            length_v[np.isnan(length_v)] = nan_length_value
         return results
 
     def _ascii_art(self, char1='-', show_internal=True, compact=False):
@@ -2148,7 +2199,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
+        >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
         >>> print(tree.ascii_art())
                             /-a
                   /c-------|
@@ -2192,7 +2243,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
+        >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> root = tree
         >>> tree.find('a').accumulate_to_ancestor(root)
         4.0
@@ -2204,7 +2255,7 @@ class TreeNode(SkbioObject):
                 raise NoParentError("Provided ancestor is not in the path")
 
             if curr.length is None:
-                raise NoLengthError("No length on node %s found!" %
+                raise NoLengthError("No length on node %s found." %
                                     curr.name or "unnamed")
 
             accum += curr.length
@@ -2245,7 +2296,7 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
+        >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> tip_a = tree.find('a')
         >>> tip_d = tree.find('d')
         >>> tip_a.distance(tip_d)
@@ -2269,21 +2320,25 @@ class TreeNode(SkbioObject):
         nodes on large trees efficiently. The code has been modified to track
         the specific tips the distance is between
         """
+        maxkey = itemgetter(0)
+
         for n in self.postorder():
             if n.is_tip():
-                n.MaxDistTips = [[0.0, n], [0.0, n]]
+                n.MaxDistTips = ((0.0, n), (0.0, n))
             else:
                 if len(n.children) == 1:
                     raise TreeError("No support for single descedent nodes")
                 else:
-                    tip_info = [(max(c.MaxDistTips), c) for c in n.children]
+                    tip_info = [(max(c.MaxDistTips, key=maxkey), c)
+                                for c in n.children]
+
                     dists = [i[0][0] for i in tip_info]
                     best_idx = np.argsort(dists)[-2:]
-                    tip_a, child_a = tip_info[best_idx[0]]
-                    tip_b, child_b = tip_info[best_idx[1]]
-                    tip_a[0] += child_a.length or 0.0
-                    tip_b[0] += child_b.length or 0.0
-                n.MaxDistTips = [tip_a, tip_b]
+                    (tip_a_d, tip_a), child_a = tip_info[best_idx[0]]
+                    (tip_b_d, tip_b), child_b = tip_info[best_idx[1]]
+                    tip_a_d += child_a.length or 0.0
+                    tip_b_d += child_b.length or 0.0
+                n.MaxDistTips = ((tip_a_d, tip_a), (tip_b_d, tip_b))
 
     def _get_max_distance_singledesc(self):
         """returns the max distance between any pair of tips
@@ -2320,12 +2375,12 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
+        >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> dist, tips = tree.get_max_distance()
         >>> dist
         16.0
         >>> [n.name for n in tips]
-        [u'b', u'e']
+        ['b', 'e']
         """
         if not hasattr(self, 'MaxDistTips'):
             # _set_max_distance will throw a TreeError if a node with a single
@@ -2368,23 +2423,26 @@ class TreeNode(SkbioObject):
         ------
         ValueError
             If any of the specified `endpoints` are not tips
-        NoLengthError
-            If a node without length is encountered
 
         See Also
         --------
         distance
         compare_tip_distances
 
+        Notes
+        -----
+        If a node does not have an associated length, 0.0 will be used and a
+        ``RepresentationWarning`` will be raised.
+
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
+        >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> mat = tree.tip_tip_distances()
         >>> print(mat)
         4x4 distance matrix
         IDs:
-        u'a', u'b', u'd', u'e'
+        'a', 'b', 'd', 'e'
         Data:
         [[  0.   3.  14.  15.]
          [  3.   0.  15.  16.]
@@ -2435,11 +2493,14 @@ class TreeNode(SkbioObject):
             # can possibly use np.zeros
             starts, stops = [], []  # to calc ._start and ._stop for curr node
             for child in node.children:
-                if child.length is None:
-                    raise NoLengthError("Node with name '%s' doesn't have a "
-                                        "length." % child.name)
-
-                distances[child.__start:child.__stop] += child.length
+                length = child.length
+                if length is None:
+                    warnings.warn(
+                        "`TreeNode.tip_tip_distances`: Node with name %r does "
+                        "not have an associated length, so a length of 0.0 "
+                        "will be used." % child.name, RepresentationWarning)
+                    length = 0.0
+                distances[child.__start:child.__stop] += length
 
                 starts.append(child.__start)
                 stops.append(child.__stop)
@@ -2490,8 +2551,8 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree1 = TreeNode.read([u"((a,b),(c,d));"])
-        >>> tree2 = TreeNode.read([u"(((a,b),c),d);"])
+        >>> tree1 = TreeNode.read(["((a,b),(c,d));"])
+        >>> tree2 = TreeNode.read(["(((a,b),c),d);"])
         >>> tree1.compare_rfd(tree2)
         2.0
 
@@ -2551,8 +2612,8 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tree1 = TreeNode.read([u"((a,b),(c,d));"])
-        >>> tree2 = TreeNode.read([u"(((a,b),c),d);"])
+        >>> tree1 = TreeNode.read(["((a,b),(c,d));"])
+        >>> tree2 = TreeNode.read(["(((a,b),c),d);"])
         >>> tree1.compare_subsets(tree2)
         0.5
 
@@ -2625,8 +2686,8 @@ class TreeNode(SkbioObject):
         --------
         >>> from skbio import TreeNode
         >>> # note, only three common taxa between the trees
-        >>> tree1 = TreeNode.read([u"((a:1,b:1):2,(c:0.5,X:0.7):3);"])
-        >>> tree2 = TreeNode.read([u"(((a:1,b:1,Y:1):2,c:3):1,Z:4);"])
+        >>> tree1 = TreeNode.read(["((a:1,b:1):2,(c:0.5,X:0.7):3);"])
+        >>> tree2 = TreeNode.read(["(((a:1,b:1,Y:1):2,c:3):1,Z:4);"])
         >>> dist = tree1.compare_tip_distances(tree2)
         >>> print("%.9f" % dist)
         0.000133446
@@ -2665,10 +2726,12 @@ class TreeNode(SkbioObject):
         -------
         dict
             A mapping {node_id: TreeNode}
-        list of tuple of (int, int, int)
-            The first index in each tuple is the corresponding node_id. The
-            second index is the left most leaf index. The third index is the
-            right most leaf index
+        np.array of ints
+            This arrays describes the IDs of every internal node, and the ID
+            range of the immediate descendents. The first column in the array
+            corresponds to node_id. The second column is the left most
+            descendent's ID. The third column is the right most descendent's
+            ID.
         """
         self.assign_ids()
 
@@ -2693,6 +2756,8 @@ class TreeNode(SkbioObject):
             child_index.append((self.id,
                                 self.children[0].id,
                                 self.children[-1].id))
+        child_index = np.asarray(child_index)
+        child_index = np.atleast_2d(child_index)
 
         return id_index, child_index
 
@@ -2748,18 +2813,18 @@ class TreeNode(SkbioObject):
         Examples
         --------
         >>> from skbio import TreeNode
-        >>> tr = TreeNode.read([u"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,"
+        >>> tr = TreeNode.read(["(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,"
         ...                     "(H:.4,I:.5)J:1.3)K;"])
         >>> tdbl = tr.descending_branch_length()
         >>> sdbl = tr.descending_branch_length(['A','E'])
-        >>> print(tdbl, sdbl)
+        >>> print(round(tdbl, 1), round(sdbl, 1))
         8.9 2.2
         """
         self.assign_ids()
         if tip_subset is not None:
             all_tips = self.subset()
             if not set(tip_subset).issubset(all_tips):
-                raise ValueError('tip_subset contains ids that arent tip '
+                raise ValueError('tip_subset contains ids that aren\'t tip '
                                  'names.')
 
             lca = self.lowest_common_ancestor(tip_subset)
@@ -2810,21 +2875,21 @@ class TreeNode(SkbioObject):
         Cache the tip names of the tree on its internal nodes
 
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b,(c,d)e)f,(g,h)i)root;"])
+        >>> tree = TreeNode.read(["((a,b,(c,d)e)f,(g,h)i)root;"])
         >>> f = lambda n: [n.name] if n.is_tip() else []
         >>> tree.cache_attr(f, 'tip_names')
         >>> for n in tree.traverse(include_self=True):
         ...     print("Node name: %s, cache: %r" % (n.name, n.tip_names))
-        Node name: root, cache: [u'a', u'b', u'c', u'd', u'g', u'h']
-        Node name: f, cache: [u'a', u'b', u'c', u'd']
-        Node name: a, cache: [u'a']
-        Node name: b, cache: [u'b']
-        Node name: e, cache: [u'c', u'd']
-        Node name: c, cache: [u'c']
-        Node name: d, cache: [u'd']
-        Node name: i, cache: [u'g', u'h']
-        Node name: g, cache: [u'g']
-        Node name: h, cache: [u'h']
+        Node name: root, cache: ['a', 'b', 'c', 'd', 'g', 'h']
+        Node name: f, cache: ['a', 'b', 'c', 'd']
+        Node name: a, cache: ['a']
+        Node name: b, cache: ['b']
+        Node name: e, cache: ['c', 'd']
+        Node name: c, cache: ['c']
+        Node name: d, cache: ['d']
+        Node name: i, cache: ['g', 'h']
+        Node name: g, cache: ['g']
+        Node name: h, cache: ['h']
 
         """
         if cache_type in [set, frozenset]:
@@ -2836,7 +2901,7 @@ class TreeNode(SkbioObject):
                 return a + b
 
         else:
-            raise TypeError("Only list, set and frozenset are supported!")
+            raise TypeError("Only list, set and frozenset are supported.")
 
         for node in self.postorder(include_self=True):
             node._registered_caches.add(cache_attrname)
@@ -2890,7 +2955,7 @@ class TreeNode(SkbioObject):
         times.
 
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read([u"((a,b),(c,d));"])
+        >>> tree = TreeNode.read(["((a,b),(c,d));"])
         >>> rev = lambda items: items.reverse()
         >>> shuffler = tree.shuffle(names=['a', 'b'], shuffle_f=rev, n=5)
         >>> for shuffled_tree in shuffler:
diff --git a/skbio/tree/_trie.py b/skbio/tree/_trie.py
deleted file mode 100644
index a00e9f1..0000000
--- a/skbio/tree/_trie.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-from future.utils import viewitems
-
-from skbio.util._decorator import deprecated
-
-
-class _CompressedNode(object):
-    """Represents a node in the compressed trie
-
-    Parameters
-    ----------
-    key : string
-        the key attached to the node
-    values : list of objects, optional
-        the values attached to this node
-
-    """
-
-    def __init__(self, key, values=None):
-        self.values = values or []
-        self.key = key
-        self.children = {}
-
-    def __nonzero__(self):
-        return (self.key != "" or len(self.values) > 0 or
-                len(self.children.keys()) > 0)
-
-    def __len__(self):
-        """Returns the number of values attached to the node
-
-        .. warning:: This method is recursive
-        """
-        return sum(len(n) for n in self.children.values()) + len(self.values)
-
-    @property
-    def size(self):
-        """int with the number of nodes below the node
-
-        .. warning:: This method is recursive
-        """
-        return sum(n.size for n in self.children.values()) + 1
-
-    @property
-    def prefix_map(self):
-        """Dict with the prefix map
-
-        Dictionary of {values: list of values} containing the prefix map
-            of this node
-        """
-        mapping = {}
-
-        if len(self.children) == 0:
-            # we have a leaf
-            mapping = {self.values[0]: self.values[1:]}
-        else:
-            # we are at an internal node
-            for child in self.children.values():
-                mapping.update(child.prefix_map)
-            # get largest group
-            n = -1
-            key_largest = None
-            for key, value in viewitems(mapping):
-                if len(value) > n:
-                    n = len(value)
-                    key_largest = key
-            # append this node's values
-            mapping[key_largest].extend(self.values)
-
-        return mapping
-
-    def insert(self, key, value):
-        """Inserts key with value in the node
-
-        Parameters
-        ----------
-        key : string
-            The string key attached to the value
-
-        value : object
-            Object to attach to the key
-        """
-        node_key_len = len(self.key)
-        length = min(node_key_len, len(key))
-        # Follow the key into the tree
-        split_node = False
-        index = 0
-        while index < length and not split_node:
-            split_node = key[index] != self.key[index]
-            index += 1
-
-        if split_node:
-            # Index has been incremented after split_node was set to true,
-            # decrement it to make it work
-            index -= 1
-            # We need to split up the node pointed by index
-            # Get the key for the new node
-            new_key_node = _CompressedNode(key[index:], [value])
-            # Get a new node for the old key node
-            old_key_node = _CompressedNode(self.key[index:], self.values)
-            old_key_node.children = self.children
-            self.children = {key[index]: new_key_node,
-                             self.key[index]: old_key_node}
-            self.key = self.key[:index]
-            self.values = []
-        elif index == len(self.key) and index == len(key):
-            # The new key matches node key exactly
-            self.values.append(value)
-        elif index < node_key_len:
-            # Key shorter than node key
-            lower_node = _CompressedNode(self.key[index:], self.values)
-            lower_node.children = self.children
-            self.children = {self.key[index]: lower_node}
-            self.key = key
-            self.values = [value]
-        else:
-            # New key longer than current node key
-            node = self.children.get(key[index])
-            if node:
-                # insert into next node
-                node.insert(key[index:], value)
-            else:
-                # Create new node
-                new_node = _CompressedNode(key[index:], [value])
-                self.children[key[index]] = new_node
-
-    def find(self, key):
-        """Searches for key and returns values stored for the key.
-
-        Parameters
-        ----------
-        key : string
-            The key of the value to search for
-
-        Returns
-        -------
-        object
-            The value attached to the key
-        """
-        # key exhausted
-        if len(key) == 0:
-            return self.values
-
-        # find matching part of key and node_key
-        min_length = min(len(key), len(self.key))
-        keys_diff = False
-        index = 0
-        while index < min_length and not keys_diff:
-            keys_diff = key[index] != self.key[index]
-            index += 1
-
-        if keys_diff:
-            return []
-        elif index == len(key):
-            # key and node_key match exactly
-            return self.values
-        else:
-            node = self.children.get(key[index])
-            if node:
-                # descend to next node
-                return node.find(key[index:])
-        return []
-
-trie_deprecation_p = {
-    'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
-        "scikit-bio's trie functionality will be replaced with "
-        "with functionality from a dedicated package. To track "
-        "progress, see [#937]"
-        "(https://github.com/biocore/scikit-bio/issues/937).")}
-
-
-class CompressedTrie(object):
-    """ A compressed Trie for a list of (key, value) pairs
-
-    Parameters
-    ----------
-    pair_list : list of tuples, optional
-        List of (key, value) pairs to initialize the Trie
-
-    """
-
-    @deprecated(**trie_deprecation_p)
-    def __init__(self, pair_list=None):
-        self._root = _CompressedNode("")
-        if pair_list:
-            for key, value in pair_list:
-                self.insert(key, value)
-
-    @deprecated(**trie_deprecation_p)
-    def __nonzero__(self):
-        return bool(self._root)
-
-    @deprecated(**trie_deprecation_p)
-    def __len__(self):
-        return len(self._root)
-
-    @property
-    @deprecated(**trie_deprecation_p)
-    def size(self):
-        """int with the number of nodes in the Trie"""
-        return self._root.size
-
-    @property
-    @deprecated(**trie_deprecation_p)
-    def prefix_map(self):
-        """Dict with the prefix map
-
-        Dictionary of {values: list of values} containing the prefix map
-        """
-        return self._root.prefix_map
-
-    @deprecated(**trie_deprecation_p)
-    def insert(self, key, value):
-        """Inserts key with value in Trie
-
-        Parameters
-        ----------
-        key : string
-            The string key attached to the value
-
-        value : object
-            Object to attach to the key
-        """
-        self._root.insert(key, value)
-
-    @deprecated(**trie_deprecation_p)
-    def find(self, key):
-        """Searches for key and returns values stored for the key.
-
-        Parameters
-        ----------
-        key : string
-
-
-        Returns
-        -------
-        object
-            The value attached to the key
-        """
-        return self._root.find(key)
-
-
- at deprecated(**trie_deprecation_p)
-def fasta_to_pairlist(seqs):
-    """Yields (key, value) pairs, useful for populating a Trie object
-
-    Parameters
-    ----------
-    seqs : Iterable
-        tuples of the form ``(label, seq)``
-
-    Yields
-    ------
-    tuple
-        Tuple of the form ``(seq, label)``.
-
-    """
-    for label, seq in seqs:
-        yield seq, label
diff --git a/skbio/tree/tests/test_tree.py b/skbio/tree/tests/test_tree.py
index 22a922d..7891ec2 100644
--- a/skbio/tree/tests/test_tree.py
+++ b/skbio/tree/tests/test_tree.py
@@ -9,15 +9,17 @@
 from __future__ import absolute_import, division, print_function
 
 from unittest import TestCase, main
+from collections import defaultdict
 
 import numpy as np
-import numpy.testing as nptest
+import numpy.testing as npt
 from scipy.stats import pearsonr
 
 from skbio.io._fileobject import StringIO
 from skbio import DistanceMatrix, TreeNode
 from skbio.tree import (DuplicateNodeError, NoLengthError,
                         TreeError, MissingNodeError, NoParentError)
+from skbio.util import RepresentationWarning
 
 
 class TreeTests(TestCase):
@@ -50,6 +52,100 @@ class TreeTests(TestCase):
                                                    "2,(c,d)int3)int4),(e,f)int"
                                                    "5);"))
 
+    def test_observed_node_counts(self):
+        """returns observed nodes counts given vector of otu observation counts
+        """
+        # no OTUs observed
+        otu_counts = {}
+        expected = defaultdict(int)
+        self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
+                         expected)
+        # error on zero count(s)
+        otu_counts = {'a': 0}
+        self.assertRaises(ValueError, self.simple_t.observed_node_counts,
+                          otu_counts)
+        otu_counts = {'a': 0, 'b': 0, 'c': 0, 'd': 0}
+        self.assertRaises(ValueError, self.simple_t.observed_node_counts,
+                          otu_counts)
+
+        # all OTUs observed once
+        otu_counts = {'a': 1, 'b': 1, 'c': 1, 'd': 1}
+        expected = defaultdict(int)
+        expected[self.simple_t.find('root')] = 4
+        expected[self.simple_t.find('i1')] = 2
+        expected[self.simple_t.find('i2')] = 2
+        expected[self.simple_t.find('a')] = 1
+        expected[self.simple_t.find('b')] = 1
+        expected[self.simple_t.find('c')] = 1
+        expected[self.simple_t.find('d')] = 1
+        self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
+                         expected)
+
+        # some OTUs observed twice
+        otu_counts = {'a': 2, 'b': 1, 'c': 1, 'd': 1}
+        expected = defaultdict(int)
+        expected[self.simple_t.find('root')] = 5
+        expected[self.simple_t.find('i1')] = 3
+        expected[self.simple_t.find('i2')] = 2
+        expected[self.simple_t.find('a')] = 2
+        expected[self.simple_t.find('b')] = 1
+        expected[self.simple_t.find('c')] = 1
+        expected[self.simple_t.find('d')] = 1
+        self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
+                         expected)
+
+        otu_counts = {'a': 2, 'b': 1, 'c': 1, 'd': 2}
+        expected = defaultdict(int)
+        expected[self.simple_t.find('root')] = 6
+        expected[self.simple_t.find('i1')] = 3
+        expected[self.simple_t.find('i2')] = 3
+        expected[self.simple_t.find('a')] = 2
+        expected[self.simple_t.find('b')] = 1
+        expected[self.simple_t.find('c')] = 1
+        expected[self.simple_t.find('d')] = 2
+        self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
+                         expected)
+
+        # some OTUs observed, others not observed
+        otu_counts = {'a': 2, 'b': 1}
+        expected = defaultdict(int)
+        expected[self.simple_t.find('root')] = 3
+        expected[self.simple_t.find('i1')] = 3
+        expected[self.simple_t.find('a')] = 2
+        expected[self.simple_t.find('b')] = 1
+        self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
+                         expected)
+
+        otu_counts = {'d': 1}
+        expected = defaultdict(int)
+        expected[self.simple_t.find('root')] = 1
+        expected[self.simple_t.find('i2')] = 1
+        expected[self.simple_t.find('d')] = 1
+        self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
+                         expected)
+
+        # error on non-tips
+        otu_counts = {'a': 2, 'e': 1}
+        self.assertRaises(MissingNodeError, self.simple_t.observed_node_counts,
+                          otu_counts)
+        otu_counts = {'a': 2, 'i1': 1}
+        self.assertRaises(MissingNodeError, self.simple_t.observed_node_counts,
+                          otu_counts)
+
+        # test with another tree
+        otu_counts = {}
+        expected = defaultdict(int)
+        self.assertEqual(self.complex_tree.observed_node_counts(otu_counts),
+                         expected)
+
+        otu_counts = {'e': 42, 'f': 1}
+        expected[self.complex_tree.root()] = 43
+        expected[self.complex_tree.find('int5')] = 43
+        expected[self.complex_tree.find('e')] = 42
+        expected[self.complex_tree.find('f')] = 1
+        self.assertEqual(self.complex_tree.observed_node_counts(otu_counts),
+                         expected)
+
     def test_count(self):
         """Get node counts"""
         exp = 7
@@ -466,15 +562,15 @@ class TreeTests(TestCase):
             u"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
         tips = sorted([n for n in t.tips()], key=lambda x: x.name)
 
-        nptest.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
-        nptest.assert_almost_equal(tips[0].distance(tips[1]), 0.3)
-        nptest.assert_almost_equal(tips[0].distance(tips[2]), 1.3)
+        npt.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
+        npt.assert_almost_equal(tips[0].distance(tips[1]), 0.3)
+        npt.assert_almost_equal(tips[0].distance(tips[2]), 1.3)
         with self.assertRaises(NoLengthError):
             tips[0].distance(tips[3])
 
-        nptest.assert_almost_equal(tips[1].distance(tips[0]), 0.3)
-        nptest.assert_almost_equal(tips[1].distance(tips[1]), 0.0)
-        nptest.assert_almost_equal(tips[1].distance(tips[2]), 1.4)
+        npt.assert_almost_equal(tips[1].distance(tips[0]), 0.3)
+        npt.assert_almost_equal(tips[1].distance(tips[1]), 0.0)
+        npt.assert_almost_equal(tips[1].distance(tips[2]), 1.4)
         with self.assertRaises(NoLengthError):
             tips[1].distance(tips[3])
 
@@ -525,7 +621,7 @@ class TreeTests(TestCase):
         tree = TreeNode.read(StringIO(
             u"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
         dist, nodes = tree.get_max_distance()
-        nptest.assert_almost_equal(dist, 1.6)
+        npt.assert_almost_equal(dist, 1.6)
         self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
 
     def test_set_max_distance(self):
@@ -537,6 +633,39 @@ class TreeTests(TestCase):
         self.assertEqual(tip_a[0] + tip_b[0], 1.6)
         self.assertEqual(sorted([tip_a[1].name, tip_b[1].name]), ['b', 'e'])
 
+    def test_set_max_distance_tie_bug(self):
+        """Corresponds to #1077"""
+        s = StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
+        t = TreeNode.read(s)
+
+        exp = ((3.0, t.find('a')), (9.0, t.find('e')))
+
+        # the above tree would trigger an exception in max. The central issue
+        # was that the data being passed to max were a tuple of tuple:
+        # ((left_d, left_n), (right_d, right_n))
+        # the call to max would break in this scenario as it would fall onto
+        # idx 1 of each tuple to assess the "max".
+        t._set_max_distance()
+
+        self.assertEqual(t.MaxDistTips, exp)
+
+    def test_set_max_distance_inplace_modification_bug(self):
+        """Corresponds to #1223"""
+        s = StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
+        t = TreeNode.read(s)
+
+        exp = [((0.0, t.find('a')), (0.0, t.find('a'))),
+               ((0.0, t.find('b')), (0.0, t.find('b'))),
+               ((1.0, t.find('a')), (1.0, t.find('b'))),
+               ((0.0, t.find('d')), (0.0, t.find('d'))),
+               ((0.0, t.find('e')), (0.0, t.find('e'))),
+               ((3.0, t.find('d')), (4.0, t.find('e'))),
+               ((3.0, t.find('a')), (9.0, t.find('e')))]
+
+        t._set_max_distance()
+
+        self.assertEqual([n.MaxDistTips for n in t.postorder()], exp)
+
     def test_shear(self):
         """Shear the nodes"""
         t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
@@ -607,8 +736,22 @@ class TreeTests(TestCase):
 
     def test_tip_tip_distances_no_length(self):
         t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
-        with self.assertRaises(NoLengthError):
-            t.tip_tip_distances()
+        exp_t = TreeNode.read(StringIO(u"((a:0,b:0)c:0,(d:0,e:0)f:0);"))
+        exp_t_dm = exp_t.tip_tip_distances()
+
+        t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
+        self.assertEqual(t_dm, exp_t_dm)
+
+        for node in t.preorder():
+            self.assertIs(node.length, None)
+
+    def test_tip_tip_distances_missing_length(self):
+        t = TreeNode.read(StringIO(u"((a,b:6)c:4,(d,e:0)f);"))
+        exp_t = TreeNode.read(StringIO(u"((a:0,b:6)c:4,(d:0,e:0)f:0);"))
+        exp_t_dm = exp_t.tip_tip_distances()
+
+        t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
+        self.assertEqual(t_dm, exp_t_dm)
 
     def test_neighbors(self):
         """Get neighbors of a node"""
@@ -674,6 +817,13 @@ class TreeTests(TestCase):
         obs = [n.name for n in self.simple_t.levelorder()]
         self.assertEqual(obs, exp)
 
+    def test_index_tree_single_node(self):
+        """index_tree handles single node tree"""
+        t1 = TreeNode.read(StringIO(u'root;'))
+        id_index, child_index = t1.index_tree()
+        self.assertEqual(id_index[0], t1)
+        npt.assert_equal(child_index, np.array([[]]))
+
     def test_index_tree(self):
         """index_tree should produce correct index and node map"""
         # test for first tree: contains singleton outgroup
@@ -685,23 +835,25 @@ class TreeTests(TestCase):
         nodes_1 = [n.id for n in t1.traverse(self_before=False,
                    self_after=True)]
         self.assertEqual(nodes_1, [0, 1, 2, 3, 6, 4, 5, 7, 8])
-        self.assertEqual(child_1, [(2, 0, 1), (6, 2, 3), (7, 4, 5), (8, 6, 7)])
+        npt.assert_equal(child_1, np.array([[2, 0, 1], [6, 2, 3], [7, 4, 5],
+                                            [8, 6, 7]]))
 
         # test for second tree: strictly bifurcating
         id_2, child_2 = t2.index_tree()
         nodes_2 = [n.id for n in t2.traverse(self_before=False,
                    self_after=True)]
         self.assertEqual(nodes_2, [0, 1, 4, 2, 3, 5, 8, 6, 7, 9, 10])
-        self.assertEqual(child_2, [(4, 0, 1), (5, 2, 3), (8, 4, 5), (9, 6, 7),
-                                   (10, 8, 9)])
+        npt.assert_equal(child_2, np.array([[4, 0, 1], [5, 2, 3],
+                                            [8, 4, 5], [9, 6, 7],
+                                            [10, 8, 9]]))
 
         # test for third tree: contains trifurcation and single-child parent
         id_3, child_3 = t3.index_tree()
         nodes_3 = [n.id for n in t3.traverse(self_before=False,
                    self_after=True)]
         self.assertEqual(nodes_3, [0, 1, 2, 4, 3, 5, 8, 6, 7, 9, 10])
-        self.assertEqual(child_3, [(4, 0, 2), (5, 3, 3), (8, 4, 5), (9, 6, 7),
-                                   (10, 8, 9)])
+        npt.assert_equal(child_3, np.array([[4, 0, 2], [5, 3, 3], [8, 4, 5],
+                                            [9, 6, 7], [10, 8, 9]]))
 
     def test_root_at(self):
         """Form a new root"""
@@ -734,6 +886,18 @@ class TreeTests(TestCase):
         obs = t.root_at_midpoint()
         self.assertEqual(str(obs), nwk)
 
+    def test_root_at_midpoint_tie(self):
+        nwk = u"(((a:1,b:1)c:2,(d:3,e:4)f:5),g:1)root;"
+        t = TreeNode.read(StringIO(nwk))
+        exp = u"((d:3,e:4)f:2,((a:1,b:1)c:2,(g:1)):3)root;"
+        texp = TreeNode.read(StringIO(exp))
+
+        obs = t.root_at_midpoint()
+
+        for o, e in zip(obs.traverse(), texp.traverse()):
+            self.assertEqual(o.name, e.name)
+            self.assertEqual(o.length, e.length)
+
     def test_compare_subsets(self):
         """compare_subsets should return the fraction of shared subsets"""
         t = TreeNode.read(StringIO(u'((H,G),(R,M));'))
@@ -840,8 +1004,8 @@ class TreeTests(TestCase):
                                     ",(H:.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
         sdbl = tr.descending_branch_length(['A', 'E'])
-        nptest.assert_almost_equal(tdbl, 8.9)
-        nptest.assert_almost_equal(sdbl, 2.2)
+        npt.assert_almost_equal(tdbl, 8.9)
+        npt.assert_almost_equal(sdbl, 2.2)
         self.assertRaises(ValueError, tr.descending_branch_length,
                           ['A', 'DNE'])
         self.assertRaises(ValueError, tr.descending_branch_length, ['A', 'C'])
@@ -849,28 +1013,28 @@ class TreeTests(TestCase):
         tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
                                     ":.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
-        nptest.assert_almost_equal(tdbl, 8.8)
+        npt.assert_almost_equal(tdbl, 8.8)
 
         tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
                                     ",I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
-        nptest.assert_almost_equal(tdbl, 7.9)
+        npt.assert_almost_equal(tdbl, 7.9)
 
         tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
                                     ",I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length(['A', 'D', 'E'])
-        nptest.assert_almost_equal(tdbl, 2.1)
+        npt.assert_almost_equal(tdbl, 2.1)
 
         tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
                                     ":.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length(['I', 'D', 'E'])
-        nptest.assert_almost_equal(tdbl, 6.6)
+        npt.assert_almost_equal(tdbl, 6.6)
 
         # test with a situation where we have unnamed internal nodes
         tr = TreeNode.read(StringIO(u"(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I"
                                     ":.5)J:1.3);"))
         tdbl = tr.descending_branch_length()
-        nptest.assert_almost_equal(tdbl, 7.9)
+        npt.assert_almost_equal(tdbl, 7.9)
 
     def test_to_array(self):
         """Convert a tree to arrays"""
@@ -880,20 +1044,20 @@ class TreeTests(TestCase):
         arrayed = t.to_array()
 
         self.assertEqual(id_index, arrayed['id_index'])
-        self.assertEqual(child_index, arrayed['child_index'])
+        npt.assert_equal(child_index, arrayed['child_index'])
 
         exp = np.array([1, 2, 3, 5, 4, 6, 8, 9, 7, 10, np.nan])
         obs = arrayed['length']
-        nptest.assert_equal(obs, exp)
+        npt.assert_equal(obs, exp)
 
         exp = np.array(['a', 'b', 'c', 'd', 'x',
                         'y', 'e', 'f', 'z', 'z', None])
         obs = arrayed['name']
-        nptest.assert_equal(obs, exp)
+        npt.assert_equal(obs, exp)
 
         exp = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
         obs = arrayed['id']
-        nptest.assert_equal(obs, exp)
+        npt.assert_equal(obs, exp)
 
     def test_to_array_attrs(self):
         t = TreeNode.read(StringIO(
@@ -906,17 +1070,39 @@ class TreeTests(TestCase):
         self.assertEqual(len(arrayed), 3)
 
         self.assertEqual(id_index, arrayed['id_index'])
-        self.assertEqual(child_index, arrayed['child_index'])
+        npt.assert_equal(child_index, arrayed['child_index'])
 
         exp = np.array(['a', 'b', 'c', 'd', 'x',
                         'y', 'e', 'f', 'z', 'z', None])
         obs = arrayed['name']
-        nptest.assert_equal(obs, exp)
+        npt.assert_equal(obs, exp)
 
         # invalid attrs
         with self.assertRaises(AttributeError):
             t.to_array(attrs=[('name', object), ('brofist', int)])
 
+    def test_to_array_nan_length_value(self):
+        t = TreeNode.read(StringIO(u"((a:1, b:2)c:3)root;"))
+        indexed = t.to_array(nan_length_value=None)
+        npt.assert_equal(indexed['length'],
+                         np.array([1, 2, 3, np.nan], dtype=float))
+        indexed = t.to_array(nan_length_value=0.0)
+        npt.assert_equal(indexed['length'],
+                         np.array([1, 2, 3, 0.0], dtype=float))
+        indexed = t.to_array(nan_length_value=42.0)
+        npt.assert_equal(indexed['length'],
+                         np.array([1, 2, 3, 42.0], dtype=float))
+
+        t = TreeNode.read(StringIO(u"((a:1, b:2)c:3)root:4;"))
+        indexed = t.to_array(nan_length_value=42.0)
+        npt.assert_equal(indexed['length'],
+                         np.array([1, 2, 3, 4], dtype=float))
+
+        t = TreeNode.read(StringIO(u"((a:1, b:2)c)root;"))
+        indexed = t.to_array(nan_length_value=42.0)
+        npt.assert_equal(indexed['length'],
+                         np.array([1, 2, 42.0, 42.0], dtype=float))
+
     def test_from_taxonomy(self):
         input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
                           '2': ['a', 'b', 'c', None, None, 'x', 'y'],
diff --git a/skbio/tree/tests/test_trie.py b/skbio/tree/tests/test_trie.py
deleted file mode 100644
index 3df417d..0000000
--- a/skbio/tree/tests/test_trie.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-from future.builtins import zip
-
-from unittest import TestCase, main
-
-from skbio.tree import CompressedTrie, fasta_to_pairlist
-from skbio.tree._trie import _CompressedNode
-
-
-class CompressedNodeTests(TestCase):
-    """Tests for the _CompressedNode class"""
-
-    def setUp(self):
-        """Set up test data for use in compresses node unit tests"""
-        self.key = "aba"
-        self.values = [1, 2]
-        self.node = _CompressedNode(self.key, self.values)
-
-    def test_init(self):
-        """Node init should construct the right structure"""
-        # With no values should create a node with an empty list for values,
-        # the provided key as key, and an empty dictionary as children
-        n = _CompressedNode(self.key)
-        self.assertEqual(n.values, [])
-        self.assertEqual(n.key, self.key)
-        self.assertEqual(n.children, {})
-        # With values should create a node with the provided values list as
-        # values, the provided key as key, and an empty dictionary as children
-        n = _CompressedNode(self.key, self.values)
-        self.assertEqual(n.values, self.values)
-        self.assertEqual(n.key, self.key)
-        self.assertEqual(n.children, {})
-
-    def test_truth_value(self):
-        """Non zero should check for any data on the node"""
-        n = _CompressedNode("")
-        self.assertFalse(bool(n))
-        self.assertTrue(bool(self.node))
-
-    def test_len(self):
-        """Should return the number of values attached to the node"""
-        self.assertEqual(len(self.node), 2)
-
-    def test_size(self):
-        """Should return the number of nodes attached to the node"""
-        self.assertEqual(self.node.size, 1)
-
-    def test_prefix_map(self):
-        """Should return the prefix map of the node"""
-        exp = {1: [2]}
-        self.assertEqual(self.node.prefix_map, exp)
-
-    def test_insert(self):
-        """Correctly inserts a new key in the node"""
-        n = _CompressedNode(self.key, self.values)
-        n.insert("abb", [3])
-
-        # A new node has been create with the common prefix
-        self.assertEqual(n.key, "ab")
-        self.assertEqual(n.values, [])
-        # Tests the old node and the new one has been correctly added
-        # as children
-        exp_keys = set(["b", "a"])
-        self.assertEqual(set(n.children.keys()), exp_keys)
-        # Check that the children have the current values
-        self.assertEqual(n.children["b"].key, "b")
-        self.assertEqual(n.children["b"].values, [[3]])
-        self.assertEqual(n.children["b"].children, {})
-
-        self.assertEqual(n.children["a"].key, "a")
-        self.assertEqual(n.children["a"].values, [1, 2])
-        self.assertEqual(n.children["a"].children, {})
-
-    def test_find(self):
-        """The key could be found"""
-        # Correctly retrieves the key stored in the calling node
-        self.assertEqual(self.node.find("aba"), [1, 2])
-
-        # Correctly retrieves the key stored in a node attached to calling one
-        n = _CompressedNode(self.key, self.values)
-        n.insert("abb", [3])
-        self.assertEqual(n.find("aba"), [1, 2])
-        self.assertEqual(n.find("abb"), [[3]])
-        self.assertEqual(n.find("ab"), [])
-
-        # Correctly retrieves an empty list for a non existent key
-        self.assertEqual(n.find("cd"), [])
-
-
-class CompressedTrieTests(TestCase):
-    """Tests for the CompressedTrie class"""
-
-    def setUp(self):
-        """Set up test data for use in compressed trie unit tests"""
-        self.data = [("ab",  "0"),
-                     ("abababa", "1"),
-                     ("abab", "2"),
-                     ("baba", "3"),
-                     ("ababaa", "4"),
-                     ("a", "5"),
-                     ("abababa", "6"),
-                     ("bab", "7"),
-                     ("babba", "8")]
-        self.empty_trie = CompressedTrie()
-        self.trie = CompressedTrie(self.data)
-
-    def test_init(self):
-        """Trie init should construct the right structure"""
-        # In no pair_list is provided, it should create an empty Trie
-        t = CompressedTrie()
-        self.assertEqual(t._root.key, "")
-        self.assertEqual(t._root.values, [])
-        self.assertEqual(t._root.children, {})
-        # If a pair_list is provided, it should insert all the data
-        t = CompressedTrie(self.data)
-        self.assertEqual(t._root.key, "")
-        self.assertEqual(t._root.values, [])
-        self.assertEqual(set(t._root.children.keys()), set(["a", "b"]))
-
-    def test_non_zero(self):
-        """Non zero should check for any data on the trie"""
-        self.assertFalse(self.empty_trie)
-        self.assertTrue(self.trie)
-
-    def test_len(self):
-        """Should return the number of values attached to the trie"""
-        self.assertEqual(len(self.empty_trie), 0)
-        self.assertEqual(len(self.trie), 9)
-
-    def test_size(self):
-        """Should return the number of nodes attached to the trie"""
-        self.assertEqual(self.empty_trie.size, 1)
-        self.assertEqual(self.trie.size, 10)
-
-    def test_prefix_map(self):
-        """Should map prefix to values"""
-        exp1 = {"1": ["6", "2", "0", "5"],
-                "8": ["7"],
-                "3": [],
-                "4": []}
-        exp2 = {"1": ["6", "2", "0", "5"],
-                "8": [],
-                "3": ["7"],
-                "4": []}
-        self.assertTrue(self.trie.prefix_map in (exp1, exp2))
-
-    def test_insert(self):
-        """Correctly inserts a new key into the trie"""
-        t = CompressedTrie(self.data)
-        t.insert("babc", "9")
-        self.assertTrue("9" in t.find("babc"))
-
-        exp1 = {"1": ["6", "2", "0", "5"],
-                "9": ["7"],
-                "3": [],
-                "4": [],
-                "8": []}
-        exp2 = {"1": ["6", "2", "0", "5"],
-                "9": [],
-                "3": ["7"],
-                "4": [],
-                "8": []}
-        exp3 = {"1": ["6", "2", "0", "5"],
-                "9": [],
-                "3": [],
-                "4": [],
-                "8": ["7"]}
-        self.assertTrue(t.prefix_map in (exp1, exp2, exp3))
-
-    def test_find(self):
-        """Correctly founds the values present on the trie"""
-        for key, value in self.data:
-            self.assertTrue(value in self.trie.find(key))
-        self.assertEqual(self.trie.find("cac"), [])
-        self.assertEqual(self.trie.find("abababa"), ["1", "6"])
-
-
-class FastaToPairlistTests(TestCase):
-    """Tests for the fasta_to_pairlist function"""
-
-    def setUp(self):
-        self.seqs = [("sid_0", "AC"),
-                     ("sid_1", "ACAGTC"),
-                     ("sid_2", "ACTA"),
-                     ("sid_3", "CAGT"),
-                     ("sid_4", "CATGAA"),
-                     ("sid_5", "A"),
-                     ("sid_6", "CATGTA"),
-                     ("sid_7", "CAA"),
-                     ("sid_8", "CACCA")]
-
-    def test_fasta_to_pairlist(self):
-        """Correctly returns a list of (seq, label)"""
-        exp = [("AC", "sid_0"),
-               ("ACAGTC", "sid_1"),
-               ("ACTA", "sid_2"),
-               ("CAGT", "sid_3"),
-               ("CATGAA", "sid_4"),
-               ("A", "sid_5"),
-               ("CATGTA", "sid_6"),
-               ("CAA", "sid_7"),
-               ("CACCA", "sid_8")]
-
-        for obs, exp in zip(fasta_to_pairlist(self.seqs), exp):
-            self.assertEqual(obs, exp)
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/util/__init__.py b/skbio/util/__init__.py
index 68eb6d8..b36f55d 100644
--- a/skbio/util/__init__.py
+++ b/skbio/util/__init__.py
@@ -18,6 +18,7 @@ Common functionality to support testing in skbio.
 
    get_data_path
    TestRunner
+   assert_ordination_results_equal
    assert_data_frame_almost_equal
 
 Miscellaneous functionality
@@ -31,19 +32,10 @@ Generally useful functions that don't fit in more specific locations.
    cardinal_to_ordinal
    create_dir
    find_duplicates
-   flatten
    is_casava_v180_or_later
    remove_files
    safe_md5
 
-Exceptions
-----------
-
-.. autosummary::
-   :toctree: generated/
-
-   TestingUtilError
-
 Warnings
 --------
 
@@ -51,6 +43,7 @@ Warnings
    :toctree: generated/
 
    EfficiencyWarning
+   RepresentationWarning
 
 """
 
@@ -64,16 +57,16 @@ Warnings
 
 from __future__ import absolute_import, division, print_function
 
-from ._warning import EfficiencyWarning
-from ._exception import TestingUtilError
-from ._misc import (cardinal_to_ordinal, create_dir, find_duplicates, flatten,
+from ._warning import EfficiencyWarning, RepresentationWarning
+from ._misc import (cardinal_to_ordinal, create_dir, find_duplicates,
                     is_casava_v180_or_later, remove_files, safe_md5)
 from ._testing import (get_data_path, TestRunner,
+                       assert_ordination_results_equal,
                        assert_data_frame_almost_equal)
 
-__all__ = ['EfficiencyWarning', 'TestingUtilError', 'cardinal_to_ordinal',
-           'create_dir', 'find_duplicates', 'flatten',
-           'is_casava_v180_or_later', 'remove_files', 'safe_md5',
-           'get_data_path', 'TestRunner', 'assert_data_frame_almost_equal']
+__all__ = ['EfficiencyWarning', 'RepresentationWarning', 'cardinal_to_ordinal',
+           'create_dir', 'find_duplicates', 'is_casava_v180_or_later',
+           'remove_files', 'safe_md5', 'get_data_path', 'TestRunner',
+           'assert_ordination_results_equal', 'assert_data_frame_almost_equal']
 
 test = TestRunner(__file__).test
diff --git a/skbio/util/_decorator.py b/skbio/util/_decorator.py
index 258dedd..69a3bf2 100644
--- a/skbio/util/_decorator.py
+++ b/skbio/util/_decorator.py
@@ -197,7 +197,7 @@ class deprecated(_state_decorator):
 
     Used to indicate that a public class or function is deprecated, meaning
     that its API will be removed in a future version of scikit-bio. Decorating
-    functionality as experimental will update its doc string to indicate the
+    functionality as deprecated will update its doc string to indicate the
     first version of scikit-bio when the functionality was deprecated, the
     first version of scikit-bio when the functionality will no longer exist,
     and the reason for deprecation of the API. It will also cause calls to the
@@ -336,3 +336,35 @@ class classproperty(property):
 
     def __set__(self, obj, value):
         raise AttributeError("can't set attribute")
+
+
+class classonlymethod(classmethod):
+    """Just like `classmethod`, but it can't be called on an instance."""
+
+    def __init__(self, function):
+        super(classonlymethod, self).__init__(function)
+
+    def __get__(self, obj, cls=None):
+        if obj is not None:
+            raise TypeError("Class-only method called on an instance. Use"
+                            " '%s.%s' instead."
+                            % (cls.__name__, self.__func__.__name__))
+
+        evaldict = self.__func__.__globals__.copy()
+        evaldict['_call_'] = self.__func__
+        evaldict['_cls_'] = cls
+        fun = FunctionMakerDropFirstArg.create(
+            self.__func__, "return _call_(_cls_, %(shortsignature)s)",
+            evaldict, __wrapped__=self.__func__)
+        fun.__func__ = self.__func__  # Doctests need the orginal function
+        return fun
+
+
+class FunctionMakerDropFirstArg(decorator.FunctionMaker):
+    def __init__(self, *args, **kwargs):
+        super(FunctionMakerDropFirstArg, self).__init__(*args, **kwargs)
+        self.signature = self._remove_first_arg(self.signature)
+        self.shortsignature = self._remove_first_arg(self.shortsignature)
+
+    def _remove_first_arg(self, string):
+        return ",".join(string.split(',')[1:])[1:]
diff --git a/skbio/util/_metadata_repr.py b/skbio/util/_metadata_repr.py
new file mode 100644
index 0000000..eacab93
--- /dev/null
+++ b/skbio/util/_metadata_repr.py
@@ -0,0 +1,168 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import with_metaclass
+
+import six
+import itertools
+import numbers
+import textwrap
+
+from abc import ABCMeta, abstractmethod
+from skbio._base import ElasticLines
+
+
+class _MetadataReprBuilder(with_metaclass(ABCMeta, object)):
+    """Abstract base class for building  a repr for an object containing
+    metadata and/or positional metadata.
+
+    Parameters
+    ----------
+    obj : Type varies depending on subclass
+        Object to build repr for.
+    width : int
+        Maximum width of the repr.
+    indent : int
+        Number of spaces to use for indented lines.
+    """
+    def __init__(self, obj, width, indent):
+        self._obj = obj
+        self._width = width
+        self._indent = ' ' * indent
+
+    @abstractmethod
+    def _process_header(self):
+        """Used by `build` Template Method to build header for the repr"""
+
+    @abstractmethod
+    def _process_data(self):
+        """Used by `build` Template Method to build data lines for the repr"""
+
+    def build(self):
+        """Template method for building the repr"""
+
+        self._lines = ElasticLines()
+
+        self._process_header()
+        self._process_metadata()
+        self._process_positional_metadata()
+        self._process_stats()
+        self._process_data()
+
+        return self._lines.to_str()
+
+    def _process_metadata(self):
+        if self._obj.has_metadata():
+            self._lines.add_line('Metadata:')
+            # Python 3 doesn't allow sorting of mixed types so we can't just
+            # use sorted() on the metadata keys. Sort first by type then sort
+            # by value within each type.
+            for key in self._sorted_keys_grouped_by_type(self._obj.metadata):
+                value = self._obj.metadata[key]
+                self._lines.add_lines(
+                    self._format_metadata_key_value(key, value))
+
+    def _sorted_keys_grouped_by_type(self, dict_):
+        """Group keys within a dict by their type and sort within type."""
+        type_sorted = sorted(dict_, key=self._type_sort_key)
+        type_and_value_sorted = []
+        for _, group in itertools.groupby(type_sorted, self._type_sort_key):
+            type_and_value_sorted.extend(sorted(group))
+        return type_and_value_sorted
+
+    def _type_sort_key(self, key):
+        return repr(type(key))
+
+    def _format_metadata_key_value(self, key, value):
+        """Format metadata key:value, wrapping across lines if necessary."""
+        key_fmt = self._format_key(key)
+
+        supported_type = True
+        if isinstance(value, (six.text_type, six.binary_type)):
+            # for stringy values, there may be u'' or b'' depending on the type
+            # of `value` and version of Python. find the starting quote
+            # character so that wrapped text will line up with that instead of
+            # the string literal prefix character. for example:
+            #
+            #     'foo': u'abc def ghi
+            #              jkl mno'
+            value_repr = repr(value)
+            extra_indent = 1
+            if not (value_repr.startswith("'") or value_repr.startswith('"')):
+                extra_indent = 2
+        # handles any number, this includes bool
+        elif value is None or isinstance(value, numbers.Number):
+            value_repr = repr(value)
+            extra_indent = 0
+        else:
+            supported_type = False
+
+        if not supported_type or len(value_repr) > 140:
+            value_repr = str(type(value))
+            # extra indent of 1 so that wrapped text lines up past the bracket:
+            #
+            #     'foo': <type
+            #             'dict'>
+            extra_indent = 1
+
+        return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
+
+    def _process_positional_metadata(self):
+        if self._obj.has_positional_metadata():
+            self._lines.add_line('Positional metadata:')
+            for key in self._obj.positional_metadata.columns.values.tolist():
+                dtype = self._obj.positional_metadata[key].dtype
+                self._lines.add_lines(
+                    self._format_positional_metadata_column(key, dtype))
+
+    def _format_positional_metadata_column(self, key, dtype):
+        key_fmt = self._format_key(key)
+        dtype_fmt = '<dtype: %s>' % str(dtype)
+        return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
+
+    def _format_key(self, key):
+        """Format metadata key.
+
+        Includes initial indent and trailing colon and space:
+
+            <indent>'foo':<space>
+
+        """
+        key_fmt = self._indent + repr(key)
+        supported_types = (six.text_type, six.binary_type, numbers.Number,
+                           type(None))
+        if len(key_fmt) > (self._width / 2) or not isinstance(key,
+                                                              supported_types):
+            key_fmt = self._indent + str(type(key))
+        return '%s: ' % key_fmt
+
+    def _wrap_text_with_indent(self, text, initial_text, extra_indent):
+        """Wrap text across lines with an initial indentation.
+
+        For example:
+
+            'foo': 'abc def
+                    ghi jkl
+                    mno pqr'
+
+        <indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
+        lines are indented such that they line up with the start of the
+        previous line of wrapped text.
+
+        """
+        return textwrap.wrap(
+            text, width=self._width, expand_tabs=False,
+            initial_indent=initial_text,
+            subsequent_indent=' ' * (len(initial_text) + extra_indent))
+
+    def _process_stats(self):
+        self._lines.add_line('Stats:')
+        for label, value in self._obj._repr_stats():
+            self._lines.add_line('%s%s: %s' % (self._indent, label, value))
+        self._lines.add_separator()
diff --git a/skbio/util/_misc.py b/skbio/util/_misc.py
index b79521c..e4db019 100644
--- a/skbio/util/_misc.py
+++ b/skbio/util/_misc.py
@@ -14,7 +14,18 @@ from os.path import exists, isdir
 from functools import partial
 from types import FunctionType
 import inspect
-from ._decorator import experimental, deprecated
+from ._decorator import experimental
+
+
+def resolve_key(obj, key):
+    """Resolve key given an object and key."""
+    if callable(key):
+        return key(obj)
+    elif hasattr(obj, 'metadata'):
+        return obj.metadata[key]
+    raise TypeError("Could not resolve key %r. Key must be callable or %s must"
+                    " have `metadata` attribute." % (key,
+                                                     obj.__class__.__name__))
 
 
 def make_sentinel(name):
@@ -153,15 +164,15 @@ def is_casava_v180_or_later(header_line):
     Examples
     --------
     >>> from skbio.util import is_casava_v180_or_later
-    >>> print(is_casava_v180_or_later('@foo'))
+    >>> is_casava_v180_or_later(b'@foo')
     False
-    >>> id_ = '@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
-    >>> print(is_casava_v180_or_later(id_))
+    >>> id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
+    >>> is_casava_v180_or_later(id_)
     True
 
     """
     if not header_line.startswith(b'@'):
-        raise ValueError("Non-header line passed in!")
+        raise ValueError("Non-header line passed in.")
     fields = header_line.split(b':')
 
     return len(fields) == 10 and fields[7] in b'YN'
@@ -191,9 +202,9 @@ def safe_md5(open_file, block_size=2 ** 20):
 
     Examples
     --------
-    >>> from StringIO import StringIO
+    >>> from io import BytesIO
     >>> from skbio.util import safe_md5
-    >>> fd = StringIO("foo bar baz") # open file like object
+    >>> fd = BytesIO(b"foo bar baz") # open file like object
     >>> x = safe_md5(fd)
     >>> x.hexdigest()
     'ab07acbb1e496801937adfa772424bf7'
@@ -348,45 +359,6 @@ def find_duplicates(iterable):
             seen.add(e)
     return repeated
 
-flatten_deprecation_reason = (
-    "Solutions to this problem exist in the python standarnd library. "
-    "Please refer to the following links for good alternatives:\n"
-    "http://stackoverflow.com/a/952952/3639023\n"
-    "http://stackoverflow.com/a/406199/3639023")
-
-
- at deprecated(as_of="0.2.3-dev", until="0.4.1",
-            reason=flatten_deprecation_reason)
-def flatten(items):
-    """Removes one level of nesting from items
-
-    Parameters
-    ----------
-    items : iterable
-        list of items to flatten one level
-
-    Returns
-    -------
-    flattened_items : list
-        list of flattened items, items can be any sequence, but flatten always
-        returns a list.
-
-    Examples
-    --------
-    >>> from skbio.util import flatten
-    >>> h = [['a', 'b', 'c', 'd'], [1, 2, 3, 4, 5], ['x', 'y'], ['foo']]
-    >>> print(flatten(h))
-    ['a', 'b', 'c', 'd', 1, 2, 3, 4, 5, 'x', 'y', 'foo']
-
-    """
-    result = []
-    for i in items:
-        try:
-            result.extend(i)
-        except TypeError:
-            result.append(i)
-    return result
-
 
 def _get_create_dir_error_codes():
     return {'NO_ERROR':      0,
diff --git a/skbio/util/_testing.py b/skbio/util/_testing.py
index a8e616e..b357595 100644
--- a/skbio/util/_testing.py
+++ b/skbio/util/_testing.py
@@ -7,18 +7,890 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
+from future.utils import PY3
 
+import copy
 import os
 import inspect
 
-import pandas.util.testing as pdt
+import six
+import pandas as pd
 from nose import core
 from nose.tools import nottest
-from future.utils import PY3
+
+import numpy as np
+import numpy.testing as npt
+import pandas.util.testing as pdt
 
 from ._decorator import experimental
 
 
+class ReallyEqualMixin(object):
+    """Use this for testing __eq__/__ne__.
+
+    Taken and modified from the following public domain code:
+      https://ludios.org/testing-your-eq-ne-cmp/
+
+    """
+
+    def assertReallyEqual(self, a, b):
+        # assertEqual first, because it will have a good message if the
+        # assertion fails.
+        self.assertEqual(a, b)
+        self.assertEqual(b, a)
+        self.assertTrue(a == b)
+        self.assertTrue(b == a)
+        self.assertFalse(a != b)
+        self.assertFalse(b != a)
+
+        # We do not support cmp/__cmp__ because they do not exist in Python 3.
+        # However, we still test this to catch potential bugs where the
+        # object's parent class defines a __cmp__.
+        if not PY3:
+            self.assertEqual(0, cmp(a, b))  # noqa
+            self.assertEqual(0, cmp(b, a))  # noqa
+
+    def assertReallyNotEqual(self, a, b):
+        # assertNotEqual first, because it will have a good message if the
+        # assertion fails.
+        self.assertNotEqual(a, b)
+        self.assertNotEqual(b, a)
+        self.assertFalse(a == b)
+        self.assertFalse(b == a)
+        self.assertTrue(a != b)
+        self.assertTrue(b != a)
+
+        # We do not support cmp/__cmp__ because they do not exist in Python 3.
+        # However, we still test this to catch potential bugs where the
+        # object's parent class defines a __cmp__.
+        if not PY3:
+            self.assertNotEqual(0, cmp(a, b))  # noqa
+            self.assertNotEqual(0, cmp(b, a))  # noqa
+
+
+class MetadataMixinTests(object):
+    def test_constructor_invalid_type(self):
+        for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
+            with six.assertRaisesRegex(self, TypeError,
+                                       'metadata must be a dict'):
+                self._metadata_constructor_(metadata=md)
+
+    def test_constructor_no_metadata(self):
+        for md in None, {}:
+            obj = self._metadata_constructor_(metadata=md)
+
+            self.assertFalse(obj.has_metadata())
+            self.assertEqual(obj.metadata, {})
+
+    def test_constructor_with_metadata(self):
+        obj = self._metadata_constructor_(metadata={'foo': 'bar'})
+        self.assertEqual(obj.metadata, {'foo': 'bar'})
+
+        obj = self._metadata_constructor_(
+                metadata={'': '', 123: {'a': 'b', 'c': 'd'}})
+        self.assertEqual(obj.metadata, {'': '', 123: {'a': 'b', 'c': 'd'}})
+
+    def test_constructor_handles_missing_metadata_efficiently(self):
+        self.assertIsNone(self._metadata_constructor_()._metadata)
+        self.assertIsNone(self._metadata_constructor_(metadata=None)._metadata)
+
+    def test_constructor_makes_shallow_copy_of_metadata(self):
+        md = {'foo': 'bar', 42: []}
+        obj = self._metadata_constructor_(metadata=md)
+
+        self.assertEqual(obj.metadata, md)
+        self.assertIsNot(obj.metadata, md)
+
+        md['foo'] = 'baz'
+        self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
+
+        md[42].append(True)
+        self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
+
+    def test_eq(self):
+        self.assertReallyEqual(
+                self._metadata_constructor_(metadata={'foo': 42}),
+                self._metadata_constructor_(metadata={'foo': 42}))
+
+        self.assertReallyEqual(
+                self._metadata_constructor_(metadata={'foo': 42, 123: {}}),
+                self._metadata_constructor_(metadata={'foo': 42, 123: {}}))
+
+    def test_eq_missing_metadata(self):
+        self.assertReallyEqual(self._metadata_constructor_(),
+                               self._metadata_constructor_())
+        self.assertReallyEqual(self._metadata_constructor_(),
+                               self._metadata_constructor_(metadata={}))
+        self.assertReallyEqual(self._metadata_constructor_(metadata={}),
+                               self._metadata_constructor_(metadata={}))
+
+    def test_eq_handles_missing_metadata_efficiently(self):
+        obj1 = self._metadata_constructor_()
+        obj2 = self._metadata_constructor_()
+        self.assertReallyEqual(obj1, obj2)
+
+        self.assertIsNone(obj1._metadata)
+        self.assertIsNone(obj2._metadata)
+
+    def test_ne(self):
+        # Both have metadata.
+        obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
+        obj2 = self._metadata_constructor_(metadata={'id': 'bar'})
+        self.assertReallyNotEqual(obj1, obj2)
+
+        # One has metadata.
+        obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
+        obj2 = self._metadata_constructor_()
+        self.assertReallyNotEqual(obj1, obj2)
+
+    def test_copy_metadata_none(self):
+        obj = self._metadata_constructor_()
+        obj_copy = copy.copy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNone(obj._metadata)
+        self.assertIsNone(obj_copy._metadata)
+
+    def test_copy_metadata_empty(self):
+        obj = self._metadata_constructor_(metadata={})
+        obj_copy = copy.copy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertEqual(obj._metadata, {})
+        self.assertIsNone(obj_copy._metadata)
+
+    def test_copy_with_metadata(self):
+        obj = self._metadata_constructor_(metadata={'foo': [1]})
+        obj_copy = copy.copy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNot(obj._metadata, obj_copy._metadata)
+        self.assertIs(obj._metadata['foo'], obj_copy._metadata['foo'])
+
+        obj_copy.metadata['foo'].append(2)
+        obj_copy.metadata['foo2'] = 42
+
+        self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
+        self.assertEqual(obj.metadata, {'foo': [1, 2]})
+
+    def test_deepcopy_metadata_none(self):
+        obj = self._metadata_constructor_()
+        obj_copy = copy.deepcopy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNone(obj._metadata)
+        self.assertIsNone(obj_copy._metadata)
+
+    def test_deepcopy_metadata_empty(self):
+        obj = self._metadata_constructor_(metadata={})
+        obj_copy = copy.deepcopy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertEqual(obj._metadata, {})
+        self.assertIsNone(obj_copy._metadata)
+
+    def test_deepcopy_with_metadata(self):
+        obj = self._metadata_constructor_(metadata={'foo': [1]})
+        obj_copy = copy.deepcopy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNot(obj._metadata, obj_copy._metadata)
+        self.assertIsNot(obj._metadata['foo'], obj_copy._metadata['foo'])
+
+        obj_copy.metadata['foo'].append(2)
+        obj_copy.metadata['foo2'] = 42
+
+        self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
+        self.assertEqual(obj.metadata, {'foo': [1]})
+
+    def test_deepcopy_memo_is_respected(self):
+        # Basic test to ensure deepcopy's memo is passed through to recursive
+        # deepcopy calls.
+        obj = self._metadata_constructor_(metadata={'foo': 'bar'})
+        memo = {}
+        copy.deepcopy(obj, memo)
+        self.assertGreater(len(memo), 2)
+
+    def test_metadata_getter(self):
+        obj = self._metadata_constructor_(
+                metadata={42: 'foo', ('hello', 'world'): 43})
+
+        self.assertIsInstance(obj.metadata, dict)
+        self.assertEqual(obj.metadata, {42: 'foo', ('hello', 'world'): 43})
+
+        obj.metadata[42] = 'bar'
+        self.assertEqual(obj.metadata, {42: 'bar', ('hello', 'world'): 43})
+
+    def test_metadata_getter_no_metadata(self):
+        obj = self._metadata_constructor_()
+
+        self.assertIsNone(obj._metadata)
+        self.assertIsInstance(obj.metadata, dict)
+        self.assertEqual(obj.metadata, {})
+        self.assertIsNotNone(obj._metadata)
+
+    def test_metadata_setter(self):
+        obj = self._metadata_constructor_()
+
+        self.assertFalse(obj.has_metadata())
+
+        obj.metadata = {'hello': 'world'}
+        self.assertTrue(obj.has_metadata())
+        self.assertEqual(obj.metadata, {'hello': 'world'})
+
+        obj.metadata = {}
+        self.assertFalse(obj.has_metadata())
+        self.assertEqual(obj.metadata, {})
+
+    def test_metadata_setter_makes_shallow_copy(self):
+        obj = self._metadata_constructor_()
+
+        md = {'foo': 'bar', 42: []}
+        obj.metadata = md
+
+        self.assertEqual(obj.metadata, md)
+        self.assertIsNot(obj.metadata, md)
+
+        md['foo'] = 'baz'
+        self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
+
+        md[42].append(True)
+        self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
+
+    def test_metadata_setter_invalid_type(self):
+        obj = self._metadata_constructor_(metadata={123: 456})
+
+        for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
+                   pd.DataFrame()):
+            with six.assertRaisesRegex(self, TypeError,
+                                       'metadata must be a dict'):
+                obj.metadata = md
+            self.assertEqual(obj.metadata, {123: 456})
+
+    def test_metadata_deleter(self):
+        obj = self._metadata_constructor_(metadata={'foo': 'bar'})
+
+        self.assertEqual(obj.metadata, {'foo': 'bar'})
+
+        del obj.metadata
+        self.assertIsNone(obj._metadata)
+        self.assertFalse(obj.has_metadata())
+
+        # Delete again.
+        del obj.metadata
+        self.assertIsNone(obj._metadata)
+        self.assertFalse(obj.has_metadata())
+
+        obj = self._metadata_constructor_()
+
+        self.assertIsNone(obj._metadata)
+        self.assertFalse(obj.has_metadata())
+        del obj.metadata
+        self.assertIsNone(obj._metadata)
+        self.assertFalse(obj.has_metadata())
+
+    def test_has_metadata(self):
+        obj = self._metadata_constructor_()
+
+        self.assertFalse(obj.has_metadata())
+        # Handles metadata efficiently.
+        self.assertIsNone(obj._metadata)
+
+        self.assertFalse(
+                self._metadata_constructor_(metadata={}).has_metadata())
+
+        self.assertTrue(
+                self._metadata_constructor_(metadata={'': ''}).has_metadata())
+        self.assertTrue(
+                self._metadata_constructor_(
+                        metadata={'foo': 42}).has_metadata())
+
+
+class PositionalMetadataMixinTests(object):
+    def test_constructor_invalid_positional_metadata_type(self):
+        with six.assertRaisesRegex(self, TypeError,
+                                   'Invalid positional metadata. Must be '
+                                   'consumable by `pd.DataFrame` constructor. '
+                                   'Original pandas error message: '):
+            self._positional_metadata_constructor_(0, positional_metadata=2)
+
+    def test_constructor_positional_metadata_len_mismatch(self):
+        # Zero elements.
+        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(4\)'):
+            self._positional_metadata_constructor_(4, positional_metadata=[])
+
+        # Not enough elements.
+        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+            self._positional_metadata_constructor_(
+                4, positional_metadata=[2, 3, 4])
+
+        # Too many elements.
+        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+            self._positional_metadata_constructor_(
+                4, positional_metadata=[2, 3, 4, 5, 6])
+
+        # Series not enough rows.
+        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+            self._positional_metadata_constructor_(
+                4, positional_metadata=pd.Series(range(3)))
+
+        # Series too many rows.
+        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+            self._positional_metadata_constructor_(
+                4, positional_metadata=pd.Series(range(5)))
+
+        # DataFrame not enough rows.
+        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+            self._positional_metadata_constructor_(
+                4, positional_metadata=pd.DataFrame({'quality': range(3)}))
+
+        # DataFrame too many rows.
+        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+            self._positional_metadata_constructor_(
+                4, positional_metadata=pd.DataFrame({'quality': range(5)}))
+
+    def test_constructor_no_positional_metadata(self):
+        # Length zero with missing/empty positional metadata.
+        for empty in None, {}, pd.DataFrame():
+            obj = self._positional_metadata_constructor_(
+                0, positional_metadata=empty)
+
+            self.assertFalse(obj.has_positional_metadata())
+            assert_data_frame_almost_equal(obj.positional_metadata,
+                                           pd.DataFrame(index=np.arange(0)))
+
+        # Nonzero length with missing positional metadata.
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata=None)
+
+        self.assertFalse(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame(index=np.arange(3)))
+
+    def test_constructor_with_positional_metadata_len_zero(self):
+        for data in [], (), np.array([]):
+            obj = self._positional_metadata_constructor_(
+                0, positional_metadata={'foo': data})
+
+            self.assertTrue(obj.has_positional_metadata())
+            assert_data_frame_almost_equal(
+                obj.positional_metadata,
+                pd.DataFrame({'foo': data}, index=np.arange(0)))
+
+    def test_constructor_with_positional_metadata_len_one(self):
+        for data in [2], (2, ), np.array([2]):
+            obj = self._positional_metadata_constructor_(
+                1, positional_metadata={'foo': data})
+
+            self.assertTrue(obj.has_positional_metadata())
+            assert_data_frame_almost_equal(
+                obj.positional_metadata,
+                pd.DataFrame({'foo': data}, index=np.arange(1)))
+
+    def test_constructor_with_positional_metadata_len_greater_than_one(self):
+        for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
+                     (0, 42, 42, 1, 0, 8, 100, 0, 0),
+                     np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
+            obj = self._positional_metadata_constructor_(
+                9, positional_metadata={'foo': data})
+
+            self.assertTrue(obj.has_positional_metadata())
+            assert_data_frame_almost_equal(
+                obj.positional_metadata,
+                pd.DataFrame({'foo': data}, index=np.arange(9)))
+
+    def test_constructor_with_positional_metadata_multiple_columns(self):
+        obj = self._positional_metadata_constructor_(
+            5, positional_metadata={'foo': np.arange(5),
+                                    'bar': np.arange(5)[::-1]})
+
+        self.assertTrue(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': np.arange(5),
+                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
+
+    def test_constructor_with_positional_metadata_custom_index(self):
+        df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
+                          index=['a', 'b', 'c', 'd', 'e'])
+        obj = self._positional_metadata_constructor_(
+            5, positional_metadata=df)
+
+        self.assertTrue(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': np.arange(5),
+                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
+
+    def test_constructor_handles_missing_positional_metadata_efficiently(self):
+        obj = self._positional_metadata_constructor_(4)
+        self.assertIsNone(obj._positional_metadata)
+
+        obj = self._positional_metadata_constructor_(
+            4, positional_metadata=None)
+        self.assertIsNone(obj._positional_metadata)
+
+    def test_constructor_makes_shallow_copy_of_positional_metadata(self):
+        df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                          index=['a', 'b', 'c'])
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata=df)
+
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                         index=np.arange(3)))
+        self.assertIsNot(obj.positional_metadata, df)
+
+        # Original df is not mutated.
+        orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                               index=['a', 'b', 'c'])
+        assert_data_frame_almost_equal(df, orig_df)
+
+        # Change values of column (using same dtype).
+        df['foo'] = [42, 42, 42]
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                         index=np.arange(3)))
+
+        # Change single value of underlying data.
+        df.values[0][0] = 10
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                         index=np.arange(3)))
+
+        # Mutate list (not a deep copy).
+        df['bar'][0].append(42)
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
+                         index=np.arange(3)))
+
+    def test_eq_basic(self):
+        obj1 = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 3]})
+        obj2 = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 3]})
+        self.assertReallyEqual(obj1, obj2)
+
+    def test_eq_from_different_source(self):
+        obj1 = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': np.array([1, 2, 3])})
+        obj2 = self._positional_metadata_constructor_(
+            3, positional_metadata=pd.DataFrame({'foo': [1, 2, 3]},
+                                                index=['foo', 'bar', 'baz']))
+        self.assertReallyEqual(obj1, obj2)
+
+    def test_eq_missing_positional_metadata(self):
+        for empty in None, {}, pd.DataFrame(), pd.DataFrame(index=[]):
+            obj = self._positional_metadata_constructor_(
+                0, positional_metadata=empty)
+
+            self.assertReallyEqual(
+                obj,
+                self._positional_metadata_constructor_(0))
+            self.assertReallyEqual(
+                obj,
+                self._positional_metadata_constructor_(
+                    0, positional_metadata=empty))
+
+        for empty in None, pd.DataFrame(index=['a', 'b']):
+            obj = self._positional_metadata_constructor_(
+                2, positional_metadata=empty)
+
+            self.assertReallyEqual(
+                obj,
+                self._positional_metadata_constructor_(2))
+            self.assertReallyEqual(
+                obj,
+                self._positional_metadata_constructor_(
+                    2, positional_metadata=empty))
+
+    def test_eq_handles_missing_positional_metadata_efficiently(self):
+        obj1 = self._positional_metadata_constructor_(1)
+        obj2 = self._positional_metadata_constructor_(1)
+        self.assertReallyEqual(obj1, obj2)
+
+        self.assertIsNone(obj1._positional_metadata)
+        self.assertIsNone(obj2._positional_metadata)
+
+    def test_ne_len_zero(self):
+        # Both have positional metadata.
+        obj1 = self._positional_metadata_constructor_(
+            0, positional_metadata={'foo': []})
+        obj2 = self._positional_metadata_constructor_(
+            0, positional_metadata={'foo': [], 'bar': []})
+        self.assertReallyNotEqual(obj1, obj2)
+
+        # One has positional metadata.
+        obj1 = self._positional_metadata_constructor_(
+            0, positional_metadata={'foo': []})
+        obj2 = self._positional_metadata_constructor_(0)
+        self.assertReallyNotEqual(obj1, obj2)
+
+    def test_ne_len_greater_than_zero(self):
+        # Both have positional metadata.
+        obj1 = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 3]})
+        obj2 = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 2]})
+        self.assertReallyNotEqual(obj1, obj2)
+
+        # One has positional metadata.
+        obj1 = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 3]})
+        obj2 = self._positional_metadata_constructor_(3)
+        self.assertReallyNotEqual(obj1, obj2)
+
+    def test_copy_positional_metadata_none(self):
+        obj = self._positional_metadata_constructor_(3)
+        obj_copy = copy.copy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNone(obj._positional_metadata)
+        self.assertIsNone(obj_copy._positional_metadata)
+
+    def test_copy_positional_metadata_empty(self):
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata=pd.DataFrame(index=range(3)))
+        obj_copy = copy.copy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        assert_data_frame_almost_equal(obj._positional_metadata,
+                                       pd.DataFrame(index=range(3)))
+        self.assertIsNone(obj_copy._positional_metadata)
+
+    def test_copy_with_positional_metadata(self):
+        obj = self._positional_metadata_constructor_(
+            4, positional_metadata={'bar': [[], [], [], []],
+                                    'baz': [42, 42, 42, 42]})
+        obj_copy = copy.copy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNot(obj._positional_metadata,
+                         obj_copy._positional_metadata)
+        self.assertIsNot(obj._positional_metadata.values,
+                         obj_copy._positional_metadata.values)
+        self.assertIs(obj._positional_metadata.loc[0, 'bar'],
+                      obj_copy._positional_metadata.loc[0, 'bar'])
+
+        obj_copy.positional_metadata.loc[0, 'bar'].append(1)
+        obj_copy.positional_metadata.loc[0, 'baz'] = 43
+
+        assert_data_frame_almost_equal(
+            obj_copy.positional_metadata,
+            pd.DataFrame({'bar': [[1], [], [], []],
+                          'baz': [43, 42, 42, 42]}))
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'bar': [[1], [], [], []],
+                          'baz': [42, 42, 42, 42]}))
+
+    def test_deepcopy_positional_metadata_none(self):
+        obj = self._positional_metadata_constructor_(3)
+        obj_copy = copy.deepcopy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNone(obj._positional_metadata)
+        self.assertIsNone(obj_copy._positional_metadata)
+
+    def test_deepcopy_positional_metadata_empty(self):
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata=pd.DataFrame(index=range(3)))
+        obj_copy = copy.deepcopy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        assert_data_frame_almost_equal(obj._positional_metadata,
+                                       pd.DataFrame(index=range(3)))
+        self.assertIsNone(obj_copy._positional_metadata)
+
+    def test_deepcopy_with_positional_metadata(self):
+        obj = self._positional_metadata_constructor_(
+            4, positional_metadata={'bar': [[], [], [], []],
+                                    'baz': [42, 42, 42, 42]})
+        obj_copy = copy.deepcopy(obj)
+
+        self.assertEqual(obj, obj_copy)
+        self.assertIsNot(obj, obj_copy)
+
+        self.assertIsNot(obj._positional_metadata,
+                         obj_copy._positional_metadata)
+        self.assertIsNot(obj._positional_metadata.values,
+                         obj_copy._positional_metadata.values)
+        self.assertIsNot(obj._positional_metadata.loc[0, 'bar'],
+                         obj_copy._positional_metadata.loc[0, 'bar'])
+
+        obj_copy.positional_metadata.loc[0, 'bar'].append(1)
+        obj_copy.positional_metadata.loc[0, 'baz'] = 43
+
+        assert_data_frame_almost_equal(
+            obj_copy.positional_metadata,
+            pd.DataFrame({'bar': [[1], [], [], []],
+                          'baz': [43, 42, 42, 42]}))
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'bar': [[], [], [], []],
+                          'baz': [42, 42, 42, 42]}))
+
+    def test_deepcopy_memo_is_respected(self):
+        # Basic test to ensure deepcopy's memo is passed through to recursive
+        # deepcopy calls.
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 3]})
+        memo = {}
+        copy.deepcopy(obj, memo)
+        self.assertGreater(len(memo), 2)
+
+    def test_positional_metadata_getter(self):
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [22, 22, 0]})
+
+        self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [22, 22, 0]}))
+
+        # Update existing column.
+        obj.positional_metadata['foo'] = [42, 42, 43]
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [42, 42, 43]}))
+
+        # Add new column.
+        obj.positional_metadata['foo2'] = [True, False, True]
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [42, 42, 43],
+                          'foo2': [True, False, True]}))
+
+    def test_positional_metadata_getter_no_positional_metadata(self):
+        obj = self._positional_metadata_constructor_(4)
+
+        self.assertIsNone(obj._positional_metadata)
+        self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame(index=np.arange(4)))
+        self.assertIsNotNone(obj._positional_metadata)
+
+    def test_positional_metadata_getter_set_column_series(self):
+        length = 8
+        obj = self._positional_metadata_constructor_(
+            length, positional_metadata={'foo': range(length)})
+
+        obj.positional_metadata['bar'] = pd.Series(range(length-3))
+        # pandas.Series will be padded with NaN if too short.
+        npt.assert_equal(obj.positional_metadata['bar'],
+                         np.array(list(range(length-3)) + [np.nan]*3))
+
+        obj.positional_metadata['baz'] = pd.Series(range(length+3))
+        # pandas.Series will be truncated if too long.
+        npt.assert_equal(obj.positional_metadata['baz'],
+                         np.array(range(length)))
+
+    def test_positional_metadata_getter_set_column_array(self):
+        length = 8
+        obj = self._positional_metadata_constructor_(
+            length, positional_metadata={'foo': range(length)})
+
+        # array-like objects will fail if wrong size.
+        for array_like in (np.array(range(length-1)), range(length-1),
+                           np.array(range(length+1)), range(length+1)):
+            with six.assertRaisesRegex(self, ValueError,
+                                       "Length of values does not match "
+                                       "length of index"):
+                obj.positional_metadata['bar'] = array_like
+
+    def test_positional_metadata_setter_pandas_consumable(self):
+        obj = self._positional_metadata_constructor_(3)
+
+        self.assertFalse(obj.has_positional_metadata())
+
+        obj.positional_metadata = {'foo': [3, 2, 1]}
+        self.assertTrue(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [3, 2, 1]}))
+
+        obj.positional_metadata = pd.DataFrame(index=np.arange(3))
+        self.assertFalse(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame(index=np.arange(3)))
+
+    def test_positional_metadata_setter_data_frame(self):
+        obj = self._positional_metadata_constructor_(3)
+
+        self.assertFalse(obj.has_positional_metadata())
+
+        obj.positional_metadata = pd.DataFrame({'foo': [3, 2, 1]},
+                                               index=['a', 'b', 'c'])
+        self.assertTrue(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [3, 2, 1]}))
+
+        obj.positional_metadata = pd.DataFrame(index=np.arange(3))
+        self.assertFalse(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame(index=np.arange(3)))
+
+    def test_positional_metadata_setter_none(self):
+        obj = self._positional_metadata_constructor_(
+            0, positional_metadata={'foo': []})
+
+        self.assertTrue(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': []}))
+
+        # `None` behavior differs from constructor.
+        obj.positional_metadata = None
+
+        self.assertFalse(obj.has_positional_metadata())
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame(index=np.arange(0)))
+
+    def test_positional_metadata_setter_makes_shallow_copy(self):
+        obj = self._positional_metadata_constructor_(3)
+
+        df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                          index=['a', 'b', 'c'])
+        obj.positional_metadata = df
+
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                         index=np.arange(3)))
+        self.assertIsNot(obj.positional_metadata, df)
+
+        # Original df is not mutated.
+        orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                               index=['a', 'b', 'c'])
+        assert_data_frame_almost_equal(df, orig_df)
+
+        # Change values of column (using same dtype).
+        df['foo'] = [42, 42, 42]
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                         index=np.arange(3)))
+
+        # Change single value of underlying data.
+        df.values[0][0] = 10
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
+                         index=np.arange(3)))
+
+        # Mutate list (not a deep copy).
+        df['bar'][0].append(42)
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
+                         index=np.arange(3)))
+
+    def test_positional_metadata_setter_invalid_type(self):
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 42]})
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   'Invalid positional metadata. Must be '
+                                   'consumable by `pd.DataFrame` constructor. '
+                                   'Original pandas error message: '):
+            obj.positional_metadata = 2
+
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [1, 2, 42]}))
+
+    def test_positional_metadata_setter_len_mismatch(self):
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 42]})
+
+        # `None` behavior differs from constructor.
+        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(3\)'):
+            obj.positional_metadata = None
+
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [1, 2, 42]}))
+
+        with six.assertRaisesRegex(self, ValueError, '\(4\).*\(3\)'):
+            obj.positional_metadata = [1, 2, 3, 4]
+
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [1, 2, 42]}))
+
+    def test_positional_metadata_deleter(self):
+        obj = self._positional_metadata_constructor_(
+            3, positional_metadata={'foo': [1, 2, 3]})
+
+        assert_data_frame_almost_equal(obj.positional_metadata,
+                                       pd.DataFrame({'foo': [1, 2, 3]}))
+
+        del obj.positional_metadata
+        self.assertIsNone(obj._positional_metadata)
+        self.assertFalse(obj.has_positional_metadata())
+
+        # Delete again.
+        del obj.positional_metadata
+        self.assertIsNone(obj._positional_metadata)
+        self.assertFalse(obj.has_positional_metadata())
+
+        obj = self._positional_metadata_constructor_(3)
+
+        self.assertIsNone(obj._positional_metadata)
+        self.assertFalse(obj.has_positional_metadata())
+        del obj.positional_metadata
+        self.assertIsNone(obj._positional_metadata)
+        self.assertFalse(obj.has_positional_metadata())
+
+    def test_has_positional_metadata(self):
+        obj = self._positional_metadata_constructor_(4)
+        self.assertFalse(obj.has_positional_metadata())
+        self.assertIsNone(obj._positional_metadata)
+
+        obj = self._positional_metadata_constructor_(0, positional_metadata={})
+        self.assertFalse(obj.has_positional_metadata())
+
+        obj = self._positional_metadata_constructor_(
+            4, positional_metadata=pd.DataFrame(index=np.arange(4)))
+        self.assertFalse(obj.has_positional_metadata())
+
+        obj = self._positional_metadata_constructor_(
+            4, positional_metadata=pd.DataFrame(index=['a', 'b', 'c', 'd']))
+        self.assertFalse(obj.has_positional_metadata())
+
+        obj = self._positional_metadata_constructor_(
+            0, positional_metadata={'foo': []})
+        self.assertTrue(obj.has_positional_metadata())
+
+        obj = self._positional_metadata_constructor_(
+            4, positional_metadata={'foo': [1, 2, 3, 4]})
+        self.assertTrue(obj.has_positional_metadata())
+
+        obj = self._positional_metadata_constructor_(
+            2, positional_metadata={'foo': [1, 2], 'bar': ['abc', 'def']})
+        self.assertTrue(obj.has_positional_metadata())
+
+
 @nottest
 class TestRunner(object):
     """Simple wrapper class around nosetests functionality.
@@ -60,7 +932,7 @@ class TestRunner(object):
         # NOTE: it doesn't seem to matter what the first element of the argv
         # list is, there just needs to be something there.
         argv = [self._filename, '-I DO_NOT_IGNORE_ANYTHING']
-        if not PY3:
+        if PY3:
             argv.extend(['--with-doctest', '--doctest-tests'])
         if verbose:
             argv.append('-v')
@@ -106,6 +978,171 @@ def get_data_path(fn, subfolder='data'):
 
 
 @experimental(as_of="0.4.0")
+def assert_ordination_results_equal(left, right, ignore_method_names=False,
+                                    ignore_axis_labels=False,
+                                    ignore_biplot_scores_labels=False,
+                                    ignore_directionality=False,
+                                    decimal=7):
+    """Assert that ordination results objects are equal.
+
+    This is a helper function intended to be used in unit tests that need to
+    compare ``OrdinationResults`` objects.
+
+    Parameters
+    ----------
+    left, right : OrdinationResults
+        Ordination results to be compared for equality.
+    ignore_method_names : bool, optional
+        Ignore differences in `short_method_name` and `long_method_name`.
+    ignore_axis_labels : bool, optional
+        Ignore differences in axis labels (i.e., column labels).
+    ignore_biplot_scores_labels : bool, optional
+        Ignore differences in `biplot_scores` row and column labels.
+    ignore_directionality : bool, optional
+        Ignore differences in directionality (i.e., differences in signs) for
+        attributes `samples` and `features`.
+
+    Raises
+    ------
+    AssertionError
+        If the two objects are not equal.
+
+    """
+    npt.assert_equal(type(left) is type(right), True)
+
+    if not ignore_method_names:
+        npt.assert_equal(left.short_method_name, right.short_method_name)
+        npt.assert_equal(left.long_method_name, right.long_method_name)
+
+    _assert_frame_equal(left.samples, right.samples,
+                        ignore_columns=ignore_axis_labels,
+                        ignore_directionality=ignore_directionality,
+                        decimal=decimal)
+
+    _assert_frame_equal(left.features, right.features,
+                        ignore_columns=ignore_axis_labels,
+                        ignore_directionality=ignore_directionality,
+                        decimal=decimal)
+
+    _assert_frame_equal(left.biplot_scores, right.biplot_scores,
+                        ignore_biplot_scores_labels,
+                        ignore_biplot_scores_labels,
+                        decimal=decimal)
+
+    _assert_frame_equal(left.sample_constraints, right.sample_constraints,
+                        ignore_columns=ignore_axis_labels,
+                        decimal=decimal)
+
+    _assert_series_equal(left.eigvals, right.eigvals, ignore_axis_labels,
+                         decimal=decimal)
+
+    _assert_series_equal(left.proportion_explained, right.proportion_explained,
+                         ignore_axis_labels,
+                         decimal=decimal)
+
+
+def _assert_series_equal(left_s, right_s, ignore_index=False, decimal=7):
+    # assert_series_equal doesn't like None...
+    if left_s is None or right_s is None:
+        assert left_s is None and right_s is None
+    else:
+        npt.assert_almost_equal(left_s.values, right_s.values,
+                                decimal=decimal)
+        if not ignore_index:
+            pdt.assert_index_equal(left_s.index, right_s.index)
+
+
+def _assert_frame_equal(left_df, right_df, ignore_index=False,
+                        ignore_columns=False, ignore_directionality=False,
+                        decimal=7):
+    # assert_frame_equal doesn't like None...
+    if left_df is None or right_df is None:
+        assert left_df is None and right_df is None
+    else:
+        left_values = left_df.values
+        right_values = right_df.values
+
+        if ignore_directionality:
+            left_values, right_values = _normalize_signs(left_values,
+                                                         right_values)
+        npt.assert_almost_equal(left_values, right_values, decimal=decimal)
+
+        if not ignore_index:
+            pdt.assert_index_equal(left_df.index, right_df.index)
+        if not ignore_columns:
+            pdt.assert_index_equal(left_df.columns, right_df.columns)
+
+
+def _normalize_signs(arr1, arr2):
+    """Change column signs so that "column" and "-column" compare equal.
+
+    This is needed because results of eigenproblmes can have signs
+    flipped, but they're still right.
+
+    Notes
+    =====
+
+    This function tries hard to make sure that, if you find "column"
+    and "-column" almost equal, calling a function like np.allclose to
+    compare them after calling `normalize_signs` succeeds.
+
+    To do so, it distinguishes two cases for every column:
+
+    - It can be all almost equal to 0 (this includes a column of
+      zeros).
+    - Otherwise, it has a value that isn't close to 0.
+
+    In the first case, no sign needs to be flipped. I.e., for
+    |epsilon| small, np.allclose(-epsilon, 0) is true if and only if
+    np.allclose(epsilon, 0) is.
+
+    In the second case, the function finds the number in the column
+    whose absolute value is largest. Then, it compares its sign with
+    the number found in the same index, but in the other array, and
+    flips the sign of the column as needed.
+    """
+    # Let's convert everyting to floating point numbers (it's
+    # reasonable to assume that eigenvectors will already be floating
+    # point numbers). This is necessary because np.array(1) /
+    # np.array(0) != np.array(1.) / np.array(0.)
+    arr1 = np.asarray(arr1, dtype=np.float64)
+    arr2 = np.asarray(arr2, dtype=np.float64)
+
+    if arr1.shape != arr2.shape:
+        raise ValueError(
+            "Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
+                                                                   arr2.shape)
+            )
+
+    # To avoid issues around zero, we'll compare signs of the values
+    # with highest absolute value
+    max_idx = np.abs(arr1).argmax(axis=0)
+    max_arr1 = arr1[max_idx, range(arr1.shape[1])]
+    max_arr2 = arr2[max_idx, range(arr2.shape[1])]
+
+    sign_arr1 = np.sign(max_arr1)
+    sign_arr2 = np.sign(max_arr2)
+
+    # Store current warnings, and ignore division by zero (like 1. /
+    # 0.) and invalid operations (like 0. / 0.)
+    wrn = np.seterr(invalid='ignore', divide='ignore')
+    differences = sign_arr1 / sign_arr2
+    # The values in `differences` can be:
+    #    1 -> equal signs
+    #   -1 -> diff signs
+    #   Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
+    np.seterr(**wrn)
+
+    # Now let's deal with cases where `differences != \pm 1`
+    special_cases = (~np.isfinite(differences)) | (differences == 0)
+    # In any of these cases, the sign of the column doesn't matter, so
+    # let's just keep it
+    differences[special_cases] = 1
+
+    return arr1 * differences, arr2
+
+
+ at experimental(as_of="0.4.0")
 def assert_data_frame_almost_equal(left, right):
     """Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal".
 
@@ -154,6 +1191,28 @@ def assert_data_frame_almost_equal(left, right):
     # this check ensures that empty DataFrames with different indices do not
     # compare equal. exact=True specifies that the type of the indices must be
     # exactly the same
-    pdt.assert_index_equal(left.index, right.index,
+    assert_index_equal(left.index, right.index)
+
+
+def assert_series_almost_equal(left, right):
+    # pass all kwargs to ensure this function has consistent behavior even if
+    # `assert_series_equal`'s defaults change
+    pdt.assert_series_equal(left, right,
+                            check_dtype=True,
+                            check_index_type=True,
+                            check_series_type=True,
+                            check_less_precise=False,
+                            check_names=True,
+                            check_exact=False,
+                            check_datetimelike_compat=False,
+                            obj='Series')
+    # this check ensures that empty Series with different indices do not
+    # compare equal.
+    assert_index_equal(left.index, right.index)
+
+
+def assert_index_equal(a, b):
+    pdt.assert_index_equal(a, b,
                            exact=True,
-                           check_names=True)
+                           check_names=True,
+                           check_exact=True)
diff --git a/skbio/util/_warning.py b/skbio/util/_warning.py
index a69f14f..8128415 100644
--- a/skbio/util/_warning.py
+++ b/skbio/util/_warning.py
@@ -20,3 +20,15 @@ class EfficiencyWarning(Warning):
 
     """
     pass
+
+
+class RepresentationWarning(Warning):
+    """Warn about assumptions made for the successful completion of a process.
+
+    Warn about substitutions, assumptions, or particular alterations that were
+    made for the successful completion of a process. For example, if a value
+    that is required for a task is not present, a best guess or least
+    deleterious value could be used, accompanied by this warning.
+
+    """
+    pass
diff --git a/skbio/util/tests/test_decorator.py b/skbio/util/tests/test_decorator.py
index eef2d1f..625a947 100644
--- a/skbio/util/tests/test_decorator.py
+++ b/skbio/util/tests/test_decorator.py
@@ -11,12 +11,70 @@ import unittest
 import inspect
 import warnings
 
-from skbio.util._decorator import classproperty, overrides
+from skbio.util._decorator import classproperty, overrides, classonlymethod
 from skbio.util._decorator import (stable, experimental, deprecated,
                                    _state_decorator)
 from skbio.util._exception import OverrideError
 
 
+class TestClassOnlyMethod(unittest.TestCase):
+    def test_works_on_class(self):
+        class A(object):
+            @classonlymethod
+            def example(cls):
+                return cls
+
+        self.assertEqual(A.example(), A)
+
+    def test_fails_on_instance(self):
+        class A(object):
+            @classonlymethod
+            def example(cls):
+                pass
+
+        with self.assertRaises(TypeError) as e:
+            A().example()
+
+        self.assertIn('A.example', str(e.exception))
+        self.assertIn('instance', str(e.exception))
+
+    def test_matches_classmethod(self):
+        class A(object):
+            pass
+
+        def example(cls, thing):
+            """doc"""
+
+        A.example1 = classmethod(example)
+        A.example2 = classonlymethod(example)
+
+        self.assertEqual(A.__dict__['example1'].__func__, example)
+        self.assertEqual(A.__dict__['example2'].__func__, example)
+
+        self.assertEqual(A.example1.__doc__, example.__doc__)
+        self.assertEqual(A.example2.__doc__, example.__doc__)
+
+        self.assertEqual(A.example1.__name__, example.__name__)
+        self.assertEqual(A.example2.__name__, example.__name__)
+
+    def test_passes_args_kwargs(self):
+        self.ran_test = False
+
+        class A(object):
+            @classonlymethod
+            def example(cls, arg1, arg2, kwarg1=None, kwarg2=None,
+                        default=5):
+                self.assertEqual(arg1, 1)
+                self.assertEqual(arg2, 2)
+                self.assertEqual(kwarg1, '1')
+                self.assertEqual(kwarg2, '2')
+                self.assertEqual(default, 5)
+                self.ran_test = True
+
+        A.example(1, *[2], kwarg2='2', **{'kwarg1': '1'})
+        self.assertTrue(self.ran_test)
+
+
 class TestOverrides(unittest.TestCase):
     def test_raises_when_missing(self):
         class A(object):
diff --git a/skbio/util/tests/test_misc.py b/skbio/util/tests/test_misc.py
index 6897bf3..0edd952 100644
--- a/skbio/util/tests/test_misc.py
+++ b/skbio/util/tests/test_misc.py
@@ -18,9 +18,9 @@ from shutil import rmtree
 from uuid import uuid4
 
 from skbio.util import (cardinal_to_ordinal, safe_md5, remove_files,
-                        create_dir, find_duplicates, flatten,
-                        is_casava_v180_or_later)
-from skbio.util._misc import _handle_error_codes, MiniRegistry, chunk_str
+                        create_dir, find_duplicates, is_casava_v180_or_later)
+from skbio.util._misc import (
+    _handle_error_codes, MiniRegistry, chunk_str, resolve_key)
 
 
 class TestMiniRegistry(unittest.TestCase):
@@ -131,6 +131,33 @@ class TestMiniRegistry(unittest.TestCase):
                          " happening now.\n                ")
 
 
+class ResolveKeyTests(unittest.TestCase):
+    def test_callable(self):
+        def func(x):
+            return str(x)
+
+        self.assertEqual(resolve_key(1, func), "1")
+        self.assertEqual(resolve_key(4, func), "4")
+
+    def test_index(self):
+        class MetadataHaver(dict):
+            metadata = {}
+
+            @property
+            def metadata(self):
+                return self
+
+        obj = MetadataHaver({'foo': 123})
+        self.assertEqual(resolve_key(obj, 'foo'), 123)
+
+        obj = MetadataHaver({'foo': 123, 'bar': 'baz'})
+        self.assertEqual(resolve_key(obj, 'bar'), 'baz')
+
+    def test_wrong_type(self):
+        with self.assertRaises(TypeError):
+            resolve_key({'foo': 1}, 'foo')
+
+
 class ChunkStrTests(unittest.TestCase):
     def test_even_split(self):
         self.assertEqual(chunk_str('abcdef', 6, ' '), 'abcdef')
@@ -238,10 +265,6 @@ class MiscTests(unittest.TestCase):
         obs = _handle_error_codes('/foo/bar/baz')
         self.assertEqual(obs, 0)
 
-    def test_flatten(self):
-        self.assertEqual(flatten(['aa', 'bb', 'cc']), list('aabbcc'))
-        self.assertEqual(flatten([1, [2, 3], [[4, [5]]]]), [1, 2, 3, [4, [5]]])
-
 
 class CardinalToOrdinalTests(unittest.TestCase):
     def test_valid_range(self):
diff --git a/skbio/util/tests/test_testing.py b/skbio/util/tests/test_testing.py
index b6adf52..236bd54 100644
--- a/skbio/util/tests/test_testing.py
+++ b/skbio/util/tests/test_testing.py
@@ -14,8 +14,12 @@ import unittest
 
 import pandas as pd
 import numpy as np
+import numpy.testing as npt
 
-from skbio.util import get_data_path, assert_data_frame_almost_equal
+from skbio import OrdinationResults
+from skbio.util import (get_data_path, assert_ordination_results_equal,
+                        assert_data_frame_almost_equal)
+from skbio.util._testing import _normalize_signs, assert_series_almost_equal
 
 
 class TestGetDataPath(unittest.TestCase):
@@ -27,6 +31,144 @@ class TestGetDataPath(unittest.TestCase):
         self.assertEqual(data_path_2, data_path)
 
 
+class TestAssertOrdinationResultsEqual(unittest.TestCase):
+    def test_assert_ordination_results_equal(self):
+        minimal1 = OrdinationResults('foo', 'bar', pd.Series([1.0, 2.0]),
+                                     pd.DataFrame([[1, 2, 3], [4, 5, 6]]))
+
+        # a minimal set of results should be equal to itself
+        assert_ordination_results_equal(minimal1, minimal1)
+
+        # type mismatch
+        with npt.assert_raises(AssertionError):
+            assert_ordination_results_equal(minimal1, 'foo')
+
+        # numeric values should be checked that they're almost equal
+        almost_minimal1 = OrdinationResults(
+            'foo', 'bar',
+            pd.Series([1.0000001, 1.9999999]),
+            pd.DataFrame([[1, 2, 3], [4, 5, 6]]))
+        assert_ordination_results_equal(minimal1, almost_minimal1)
+
+        # test each of the optional numeric attributes
+        for attr in ('features', 'samples', 'biplot_scores',
+                     'sample_constraints'):
+            # missing optional numeric attribute in one, present in the other
+            setattr(almost_minimal1, attr, pd.DataFrame([[1, 2], [3, 4]]))
+            with npt.assert_raises(AssertionError):
+                assert_ordination_results_equal(minimal1, almost_minimal1)
+            setattr(almost_minimal1, attr, None)
+
+            # optional numeric attributes present in both, but not almost equal
+            setattr(minimal1, attr, pd.DataFrame([[1, 2], [3, 4]]))
+            setattr(almost_minimal1, attr, pd.DataFrame([[1, 2],
+                                                         [3.00002, 4]]))
+            with npt.assert_raises(AssertionError):
+                assert_ordination_results_equal(minimal1, almost_minimal1)
+            setattr(minimal1, attr, None)
+            setattr(almost_minimal1, attr, None)
+
+            # optional numeric attributes present in both, and almost equal
+            setattr(minimal1, attr, pd.DataFrame([[1.0, 2.0], [3.0, 4.0]]))
+            setattr(almost_minimal1, attr,
+                    pd.DataFrame([[1.0, 2.0], [3.00000002, 4]]))
+            assert_ordination_results_equal(minimal1, almost_minimal1)
+            setattr(minimal1, attr, None)
+            setattr(almost_minimal1, attr, None)
+
+        # missing optional numeric attribute in one, present in the other
+        almost_minimal1.proportion_explained = pd.Series([1, 2, 3])
+        with npt.assert_raises(AssertionError):
+            assert_ordination_results_equal(minimal1, almost_minimal1)
+        almost_minimal1.proportion_explained = None
+
+        # optional numeric attributes present in both, but not almost equal
+        minimal1.proportion_explained = pd.Series([1, 2, 3])
+        almost_minimal1.proportion_explained = pd.Series([1, 2, 3.00002])
+        with npt.assert_raises(AssertionError):
+            assert_ordination_results_equal(minimal1, almost_minimal1)
+        almost_minimal1.proportion_explained = None
+        almost_minimal1.proportion_explained = None
+
+        # optional numeric attributes present in both, and almost equal
+        minimal1.proportion_explained = pd.Series([1, 2, 3])
+        almost_minimal1.proportion_explained = pd.Series([1, 2, 3.00000002])
+        assert_ordination_results_equal(minimal1, almost_minimal1)
+        almost_minimal1.proportion_explained = None
+        almost_minimal1.proportion_explained = None
+
+
+class TestNormalizeSigns(unittest.TestCase):
+    def test_shapes_and_nonarray_input(self):
+        with self.assertRaises(ValueError):
+            _normalize_signs([[1, 2], [3, 5]], [[1, 2]])
+
+    def test_works_when_different(self):
+        """Taking abs value of everything would lead to false
+        positives."""
+        a = np.array([[1, -1],
+                      [2, 2]])
+        b = np.array([[-1, -1],
+                      [2, 2]])
+        with self.assertRaises(AssertionError):
+            npt.assert_equal(*_normalize_signs(a, b))
+
+    def test_easy_different(self):
+        a = np.array([[1, 2],
+                      [3, -1]])
+        b = np.array([[-1, 2],
+                      [-3, -1]])
+        npt.assert_equal(*_normalize_signs(a, b))
+
+    def test_easy_already_equal(self):
+        a = np.array([[1, -2],
+                      [3, 1]])
+        b = a.copy()
+        npt.assert_equal(*_normalize_signs(a, b))
+
+    def test_zeros(self):
+        a = np.array([[0, 3],
+                      [0, -1]])
+        b = np.array([[0, -3],
+                      [0, 1]])
+        npt.assert_equal(*_normalize_signs(a, b))
+
+    def test_hard(self):
+        a = np.array([[0, 1],
+                      [1, 2]])
+        b = np.array([[0, 1],
+                      [-1, 2]])
+        npt.assert_equal(*_normalize_signs(a, b))
+
+    def test_harder(self):
+        """We don't want a value that might be negative due to
+        floating point inaccuracies to make a call to allclose in the
+        result to be off."""
+        a = np.array([[-1e-15, 1],
+                      [5, 2]])
+        b = np.array([[1e-15, 1],
+                      [5, 2]])
+        # Clearly a and b would refer to the same "column
+        # eigenvectors" but a slopppy implementation of
+        # _normalize_signs could change the sign of column 0 and make a
+        # comparison fail
+        npt.assert_almost_equal(*_normalize_signs(a, b))
+
+    def test_column_zeros(self):
+        a = np.array([[0, 1],
+                      [0, 2]])
+        b = np.array([[0, -1],
+                      [0, -2]])
+        npt.assert_equal(*_normalize_signs(a, b))
+
+    def test_column_almost_zero(self):
+        a = np.array([[1e-15, 3],
+                      [-2e-14, -6]])
+        b = np.array([[0, 3],
+                      [-1e-15, -6]])
+        npt.assert_almost_equal(*_normalize_signs(a, b))
+
+
 class TestAssertDataFrameAlmostEqual(unittest.TestCase):
     def setUp(self):
         self.df = pd.DataFrame(
@@ -58,9 +200,10 @@ class TestAssertDataFrameAlmostEqual(unittest.TestCase):
             pd.DataFrame(index=np.arange(9), columns=np.arange(9))
         ]
 
-        # each df should compare equal to itself
+        # each df should compare equal to itself and a copy of itself
         for df in unequal_dfs:
             assert_data_frame_almost_equal(df, df)
+            assert_data_frame_almost_equal(df, pd.DataFrame(df, copy=True))
 
         # every pair of dfs should not compare equal. use permutations instead
         # of combinations to test that comparing df1 to df2 and df2 to df1 are
@@ -84,5 +227,37 @@ class TestAssertDataFrameAlmostEqual(unittest.TestCase):
             assert_data_frame_almost_equal(df1, df2)
 
 
+class TestAssertSeriesAlmostEqual(unittest.TestCase):
+
+    def setUp(self):
+        self.series = [
+            pd.Series(),
+            pd.Series(dtype='int64'),
+            pd.Series([1, 2, 3]),
+            pd.Series([3, 2, 1]),
+            pd.Series([1, 2, 3, 4]),
+            pd.Series([1., 2., 3.]),
+            pd.Series([1, 2, 3], [1.0, 2.0, 3.0]),
+            pd.Series([1, 2, 3], [1, 2, 3]),
+            pd.Series([1, 2, 3], ['c', 'b', 'a']),
+            pd.Series([3, 2, 1], ['c', 'b', 'a']),
+        ]
+
+    def test_not_equal(self):
+        # no pair of series should compare equal
+        for s1, s2 in itertools.permutations(self.series, 2):
+            with self.assertRaises(AssertionError):
+                assert_series_almost_equal(s1, s2)
+
+    def test_equal(self):
+        s1 = pd.Series([1., 2., 3.])
+        s2 = pd.Series([1.000001, 2., 3.])
+        assert_series_almost_equal(s1, s2)
+
+        # all series should be equal to themselves and copies of themselves
+        for s in self.series:
+            assert_series_almost_equal(s, s)
+            assert_series_almost_equal(s, pd.Series(s, copy=True))
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/skbio/workflow.py b/skbio/workflow.py
index 8d6778e..a39affc 100644
--- a/skbio/workflow.py
+++ b/skbio/workflow.py
@@ -108,12 +108,12 @@ to walk through an item at a time so we can examine the debug information.
 
 >>> wf = SequenceProcessor(state=None, options={'reverse':True}, debug=True)
 >>> gen = wf(seqs, fail_callback=lambda x: x.state)
->>> gen.next()
+>>> next(gen)
 'TTTTTTTAAAAAAA'
->>> print(wf.failed)
+>>> wf.failed
 False
->>> print(wf.debug_trace)
-set([('check_length', 0), ('reverse', 2)])
+>>> sorted(wf.debug_trace)
+[('check_length', 0), ('reverse', 2)]
 
 The ``debug_trace`` specifies the methods executed, and the order of their
 execution where closer to zero indicates earlier in the execution order. Gaps
@@ -125,12 +125,12 @@ this time through the workflow?
 Now, let's take a look at the next item, which on our prior run through the
 workflow was a failed item.
 
->>> gen.next()
+>>> next(gen)
 'ATAGACC'
->>> print(wf.failed)
+>>> wf.failed
 True
->>> print(wf.debug_trace)
-set([('check_length', 0)])
+>>> sorted(wf.debug_trace)
+[('check_length', 0)]
 
 What we can see is that the failed sequence only executed the check_length
 method. Since the sequence didn't pass our length filter of 10 nucleotides,
@@ -141,12 +141,12 @@ be disabled if desired).
 This third item previously matched our nucleotide pattern of interest for
 truncation. Let's see what that looks like in the debug output.
 
->>> gen.next() #
+>>> next(gen)
 'CAGGCC'
->>> print(wf.failed)
+>>> wf.failed
 False
->>> wf.debug_trace
-set([('check_length', 0), ('truncate', 1), ('reverse', 2)])
+>>> sorted(wf.debug_trace)
+[('check_length', 0), ('reverse', 2), ('truncate', 1)]
 
 In this last example, we can see that the ``truncate`` method was executed
 prior to the ``reverse`` method and following the ``check_length`` method. This
@@ -428,7 +428,7 @@ class Workflow(object):
             """Track debug information about a method execution"""
             if not hasattr(self, 'debug_trace'):
                 raise AttributeError(
-                    "%s doesn't have debug_trace!" % self.__class__)
+                    "%s doesn't have debug_trace." % self.__class__)
 
             exec_order = self.debug_counter
             name = func.__name__

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-skbio.git



More information about the debian-med-commit mailing list