[med-svn] [python-skbio] 06/13: Imported Upstream version 0.2.3

Andreas Tille tille at debian.org
Thu Jul 30 15:46:25 UTC 2015


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository python-skbio.

commit 639dea58d41a3ccd27ca828cde0875e7facadf86
Author: Andreas Tille <tille at debian.org>
Date:   Thu Jul 30 17:36:37 2015 +0200

    Imported Upstream version 0.2.3
---
 .coveragerc                                        |    20 +
 .gitignore                                         |    46 +
 .travis.yml                                        |    38 +
 CHANGELOG.md                                       |   297 +
 CONTRIBUTING.md                                    |   139 +
 COPYING.txt                                        |    27 +
 MANIFEST.in                                        |    17 +
 README.rst                                         |   164 +
 RELEASE.md                                         |   124 +
 assets/.no.gif                                     |   Bin 0 -> 2052678 bytes
 assets/horizontal_powered_by.png                   |   Bin 0 -> 15003 bytes
 assets/horizontal_powered_by.svg                   |   152 +
 assets/logo.png                                    |   Bin 0 -> 12397 bytes
 assets/logo.svg                                    |    76 +
 assets/logo_and_powered.pdf                        |   Bin 0 -> 63684 bytes
 assets/vertical_powered_by.png                     |   Bin 0 -> 15865 bytes
 assets/vertical_powered_by.svg                     |   152 +
 checklist.py                                       |   387 +
 debian/changelog                                   |    12 -
 debian/compat                                      |     1 -
 debian/control                                     |    50 -
 debian/copyright                                   |    39 -
 debian/rules                                       |    20 -
 debian/source/format                               |     1 -
 debian/watch                                       |     2 -
 doc/Makefile                                       |   183 +
 doc/README.md                                      |   244 +
 doc/source/_static/copybutton.js                   |    60 +
 doc/source/_static/style.css                       |    77 +
 doc/source/_templates/autosummary/attribute.rst    |     9 +
 doc/source/_templates/autosummary/class.rst        |    27 +
 doc/source/_templates/autosummary/method.rst       |     9 +
 doc/source/_templates/autosummary/module.rst       |     2 +
 doc/source/_templates/layout.html                  |    35 +
 doc/source/alignment.rst                           |     1 +
 doc/source/conf.py                                 |   442 +
 doc/source/development/coding_guidelines.rst       |   379 +
 doc/source/development/new_module.rst              |    49 +
 doc/source/development/py3.rst                     |   366 +
 doc/source/diversity.rst                           |     1 +
 doc/source/draw.rst                                |     1 +
 doc/source/format.sequences.rst                    |     1 +
 doc/source/index.rst                               |    37 +
 doc/source/io.rst                                  |     1 +
 doc/source/parse.sequences.rst                     |     1 +
 doc/source/sequence.rst                            |     1 +
 doc/source/stats.rst                               |     1 +
 doc/source/tree.rst                                |     1 +
 doc/source/util.rst                                |     1 +
 doc/source/workflow.rst                            |     1 +
 doc/sphinxext/numpydoc/LICENSE.txt                 |    94 +
 doc/sphinxext/numpydoc/README.rst                  |    54 +
 doc/sphinxext/numpydoc/numpydoc/__init__.py        |     3 +
 doc/sphinxext/numpydoc/numpydoc/comment_eater.py   |   169 +
 .../numpydoc/numpydoc/compiler_unparse.py          |   865 ++
 doc/sphinxext/numpydoc/numpydoc/docscrape.py       |   525 +
 .../numpydoc/numpydoc/docscrape_sphinx.py          |   274 +
 doc/sphinxext/numpydoc/numpydoc/linkcode.py        |    83 +
 doc/sphinxext/numpydoc/numpydoc/numpydoc.py        |   187 +
 doc/sphinxext/numpydoc/numpydoc/phantom_import.py  |   167 +
 doc/sphinxext/numpydoc/numpydoc/plot_directive.py  |   642 +
 .../numpydoc/numpydoc/tests/test_docscrape.py      |   767 +
 .../numpydoc/numpydoc/tests/test_linkcode.py       |     5 +
 .../numpydoc/numpydoc/tests/test_phantom_import.py |    12 +
 .../numpydoc/numpydoc/tests/test_plot_directive.py |    11 +
 .../numpydoc/numpydoc/tests/test_traitsdoc.py      |    11 +
 doc/sphinxext/numpydoc/numpydoc/traitsdoc.py       |   142 +
 .../2014.05.13-ElBrogrammer/README.md              |     9 +
 .../presentations/2014.05.13-ElBrogrammer/dm.txt   |   440 +
 .../presentations/2014.05.13-ElBrogrammer/map.txt  |   466 +
 .../scikit-bio presentation.ipynb                  |  1274 ++
 .../2014.05.13-ElBrogrammer/smalldm.txt            |     3 +
 .../2014.05.13-ElBrogrammer/style.css              |   126 +
 .../2014.05.13-ElBrogrammer/talktools.py           |    41 +
 .../caporaso-scipy2014.ipynb                       |  1470 ++
 .../2014.07.09-gregcaporaso/code-deletion-pr.png   |   Bin 0 -> 419649 bytes
 .../2014.07.09-gregcaporaso/code-sprint-1.jpg      |   Bin 0 -> 132209 bytes
 .../2014.07.09-gregcaporaso/code-sprint-2.jpg      |   Bin 0 -> 88766 bytes
 .../2014.07.09-gregcaporaso/custom.css             |    39 +
 .../2014.07.09-gregcaporaso/iab-example.png        |   Bin 0 -> 509625 bytes
 .../presentations/2014.07.09-gregcaporaso/logo.png |   Bin 0 -> 77935 bytes
 .../2014.07.09-gregcaporaso/qiime-cites.png        |   Bin 0 -> 1595191 bytes
 .../skbio-alignment-docs.png                       |   Bin 0 -> 272465 bytes
 .../2014.07.09-gregcaporaso/skbio-bdiv-docs.png    |   Bin 0 -> 335441 bytes
 .../2014.07.09-gregcaporaso/skbio-contributors.png |   Bin 0 -> 289973 bytes
 .../2014.07.09-gregcaporaso/skbio-docs.png         |   Bin 0 -> 229789 bytes
 .../2014.07.09-gregcaporaso/skbio-timeline.png     |   Bin 0 -> 139814 bytes
 .../2014.07.09-gregcaporaso/skbio.png              |   Bin 0 -> 3385 bytes
 .../2014.07.09-gregcaporaso/style.css              |   150 +
 .../2014.07.09-gregcaporaso/talktools.py           |    39 +
 licenses/fastq-example-files-readme.txt            |   109 +
 licenses/ipython.txt                               |    74 +
 licenses/nb-slideshow-template.txt                 |    30 +
 licenses/numpydoc.txt                              |     1 +
 licenses/qiita.txt                                 |    27 +
 licenses/scikit-learn.txt                          |    35 +
 licenses/scipy.txt                                 |    31 +
 licenses/sphinx-bootstrap-theme.txt                |    19 +
 licenses/ssw.txt                                   |    46 +
 licenses/verman.txt                                |    27 +
 setup.py                                           |   102 +
 skbio/__init__.py                                  |    92 +
 skbio/_base.py                                     |    25 +
 skbio/alignment/__init__.py                        |   250 +
 skbio/alignment/_alignment.py                      |  2095 +++
 skbio/alignment/_exception.py                      |    26 +
 skbio/alignment/_lib/__init__.py                   |    10 +
 skbio/alignment/_lib/ssw.c                         |   861 ++
 skbio/alignment/_lib/ssw.h                         |   130 +
 skbio/alignment/_pairwise.py                       |   903 ++
 skbio/alignment/_ssw_wrapper.c                     | 14406 +++++++++++++++++++
 skbio/alignment/_ssw_wrapper.pyx                   |   804 ++
 skbio/alignment/tests/__init__.py                  |     7 +
 skbio/alignment/tests/test_alignment.py            |  1128 ++
 skbio/alignment/tests/test_pairwise.py             |   585 +
 skbio/alignment/tests/test_ssw.py                  |   739 +
 skbio/diversity/__init__.py                        |    30 +
 skbio/diversity/alpha/__init__.py                  |   161 +
 skbio/diversity/alpha/_ace.py                      |   116 +
 skbio/diversity/alpha/_base.py                     |   913 ++
 skbio/diversity/alpha/_chao1.py                    |   218 +
 skbio/diversity/alpha/_gini.py                     |   124 +
 skbio/diversity/alpha/_lladser.py                  |   605 +
 skbio/diversity/alpha/tests/__init__.py            |     9 +
 skbio/diversity/alpha/tests/test_ace.py            |    41 +
 skbio/diversity/alpha/tests/test_base.py           |   297 +
 skbio/diversity/alpha/tests/test_chao1.py          |    81 +
 skbio/diversity/alpha/tests/test_gini.py           |    64 +
 skbio/diversity/alpha/tests/test_lladser.py        |   242 +
 skbio/diversity/beta/__init__.py                   |   194 +
 skbio/diversity/beta/_base.py                      |   102 +
 skbio/diversity/beta/tests/__init__.py             |     7 +
 skbio/diversity/beta/tests/test_base.py            |   165 +
 skbio/draw/__init__.py                             |    37 +
 skbio/draw/_distributions.py                       |   701 +
 skbio/draw/tests/__init__.py                       |     9 +
 skbio/draw/tests/test_distributions.py             |   576 +
 skbio/format/__init__.py                           |    12 +
 skbio/format/sequences/__init__.py                 |    39 +
 skbio/format/sequences/fasta.py                    |   176 +
 skbio/format/sequences/fastq.py                    |    78 +
 skbio/format/sequences/tests/__init__.py           |     9 +
 skbio/format/sequences/tests/test_fasta.py         |    74 +
 skbio/format/sequences/tests/test_fastq.py         |    39 +
 skbio/io/__init__.py                               |   322 +
 skbio/io/_base.py                                  |   187 +
 skbio/io/_exception.py                             |    95 +
 skbio/io/_registry.py                              |   818 ++
 skbio/io/_warning.py                               |    19 +
 skbio/io/clustal.py                                |   328 +
 skbio/io/fasta.py                                  |   833 ++
 skbio/io/fastq.py                                  |   514 +
 skbio/io/lsmat.py                                  |   231 +
 skbio/io/newick.py                                 |   485 +
 skbio/io/ordination.py                             |   416 +
 skbio/io/phylip.py                                 |   245 +
 skbio/io/qseq.py                                   |   253 +
 skbio/io/tests/__init__.py                         |     7 +
 skbio/io/tests/data/empty                          |     0
 skbio/io/tests/data/error_diff_ids.fastq           |    20 +
 skbio/io/tests/data/error_double_qual.fastq        |    22 +
 skbio/io/tests/data/error_double_seq.fastq         |    22 +
 skbio/io/tests/data/error_long_qual.fastq          |    20 +
 skbio/io/tests/data/error_no_qual.fastq            |    20 +
 skbio/io/tests/data/error_qual_del.fastq           |    20 +
 skbio/io/tests/data/error_qual_escape.fastq        |    20 +
 skbio/io/tests/data/error_qual_null.fastq          |   Bin 0 -> 610 bytes
 skbio/io/tests/data/error_qual_space.fastq         |    21 +
 skbio/io/tests/data/error_qual_tab.fastq           |    21 +
 skbio/io/tests/data/error_qual_unit_sep.fastq      |    20 +
 skbio/io/tests/data/error_qual_vtab.fastq          |    20 +
 skbio/io/tests/data/error_short_qual.fastq         |    20 +
 skbio/io/tests/data/error_spaces.fastq             |    20 +
 skbio/io/tests/data/error_tabs.fastq               |    21 +
 skbio/io/tests/data/error_trunc_at_plus.fastq      |    19 +
 skbio/io/tests/data/error_trunc_at_qual.fastq      |    19 +
 skbio/io/tests/data/error_trunc_at_seq.fastq       |    18 +
 skbio/io/tests/data/error_trunc_in_plus.fastq      |    19 +
 skbio/io/tests/data/error_trunc_in_qual.fastq      |    20 +
 skbio/io/tests/data/error_trunc_in_seq.fastq       |    18 +
 skbio/io/tests/data/error_trunc_in_title.fastq     |    17 +
 skbio/io/tests/data/fasta_10_seqs                  |    24 +
 skbio/io/tests/data/fasta_3_seqs_defaults          |     6 +
 skbio/io/tests/data/fasta_3_seqs_non_defaults      |     9 +
 ...fasta_description_newline_replacement_empty_str |     4 +
 ...asta_description_newline_replacement_multi_char |     4 +
 .../fasta_description_newline_replacement_none     |    15 +
 .../data/fasta_id_whitespace_replacement_empty_str |     4 +
 .../fasta_id_whitespace_replacement_multi_char     |     4 +
 .../data/fasta_id_whitespace_replacement_none      |     7 +
 skbio/io/tests/data/fasta_invalid_after_10_seqs    |    25 +
 skbio/io/tests/data/fasta_invalid_blank_line       |     7 +
 skbio/io/tests/data/fasta_invalid_legacy_format    |     2 +
 skbio/io/tests/data/fasta_invalid_missing_header   |     2 +
 .../data/fasta_invalid_missing_seq_data_first      |     5 +
 .../tests/data/fasta_invalid_missing_seq_data_last |     5 +
 .../data/fasta_invalid_missing_seq_data_middle     |     5 +
 .../tests/data/fasta_invalid_whitespace_only_line  |     7 +
 skbio/io/tests/data/fasta_max_width_1              |    11 +
 skbio/io/tests/data/fasta_max_width_5              |    20 +
 skbio/io/tests/data/fasta_mixed_qual_scores        |     4 +
 skbio/io/tests/data/fasta_multi_seq                |    14 +
 skbio/io/tests/data/fasta_multi_seq_roundtrip      |     6 +
 skbio/io/tests/data/fasta_prot_seqs_odd_labels     |     8 +
 .../data/fasta_sequence_collection_different_type  |     6 +
 skbio/io/tests/data/fasta_single_bio_seq_defaults  |     2 +
 .../tests/data/fasta_single_bio_seq_non_defaults   |     5 +
 skbio/io/tests/data/fasta_single_dna_seq_defaults  |     2 +
 .../tests/data/fasta_single_dna_seq_non_defaults   |     5 +
 skbio/io/tests/data/fasta_single_nuc_seq_defaults  |     2 +
 .../tests/data/fasta_single_nuc_seq_non_defaults   |     6 +
 skbio/io/tests/data/fasta_single_prot_seq_defaults |     2 +
 .../tests/data/fasta_single_prot_seq_non_defaults  |     4 +
 skbio/io/tests/data/fasta_single_rna_seq_defaults  |     2 +
 .../tests/data/fasta_single_rna_seq_non_defaults   |     5 +
 skbio/io/tests/data/fasta_single_seq               |     2 +
 skbio/io/tests/data/fastq_invalid_missing_header   |     4 +
 skbio/io/tests/data/fastq_invalid_missing_seq_data |    13 +
 skbio/io/tests/data/fastq_multi_seq_sanger         |    12 +
 skbio/io/tests/data/fastq_single_seq_illumina1.3   |    10 +
 .../data/fastq_wrapping_as_illumina_no_description |    12 +
 .../data/fastq_wrapping_as_sanger_no_description   |    12 +
 .../fastq_wrapping_original_sanger_no_description  |    24 +
 .../tests/data/fastq_writer_illumina1.3_defaults   |    12 +
 skbio/io/tests/data/fastq_writer_sanger_defaults   |    12 +
 .../io/tests/data/fastq_writer_sanger_non_defaults |    12 +
 .../data/illumina_full_range_as_illumina.fastq     |     8 +
 .../tests/data/illumina_full_range_as_sanger.fastq |     8 +
 .../illumina_full_range_original_illumina.fastq    |     8 +
 skbio/io/tests/data/longreads_as_illumina.fastq    |    40 +
 skbio/io/tests/data/longreads_as_sanger.fastq      |    40 +
 .../io/tests/data/longreads_original_sanger.fastq  |   120 +
 skbio/io/tests/data/misc_dna_as_illumina.fastq     |    16 +
 skbio/io/tests/data/misc_dna_as_sanger.fastq       |    16 +
 skbio/io/tests/data/misc_dna_original_sanger.fastq |    16 +
 skbio/io/tests/data/misc_rna_as_illumina.fastq     |    16 +
 skbio/io/tests/data/misc_rna_as_sanger.fastq       |    16 +
 skbio/io/tests/data/misc_rna_original_sanger.fastq |    16 +
 skbio/io/tests/data/ordination_L&L_CA_data_scores  |    18 +
 .../data/ordination_PCoA_sample_data_3_scores      |    22 +
 skbio/io/tests/data/ordination_error1              |    43 +
 skbio/io/tests/data/ordination_error10             |    18 +
 skbio/io/tests/data/ordination_error11             |    44 +
 skbio/io/tests/data/ordination_error12             |    22 +
 skbio/io/tests/data/ordination_error13             |    22 +
 skbio/io/tests/data/ordination_error14             |    22 +
 skbio/io/tests/data/ordination_error15             |    21 +
 skbio/io/tests/data/ordination_error16             |    44 +
 skbio/io/tests/data/ordination_error17             |    44 +
 skbio/io/tests/data/ordination_error18             |    22 +
 skbio/io/tests/data/ordination_error19             |    18 +
 skbio/io/tests/data/ordination_error2              |    42 +
 skbio/io/tests/data/ordination_error20             |    18 +
 skbio/io/tests/data/ordination_error21             |     1 +
 skbio/io/tests/data/ordination_error22             |     2 +
 skbio/io/tests/data/ordination_error23             |     4 +
 skbio/io/tests/data/ordination_error24             |     8 +
 skbio/io/tests/data/ordination_error3              |    43 +
 skbio/io/tests/data/ordination_error4              |    43 +
 skbio/io/tests/data/ordination_error5              |    44 +
 skbio/io/tests/data/ordination_error6              |    44 +
 skbio/io/tests/data/ordination_error7              |     3 +
 skbio/io/tests/data/ordination_error8              |    22 +
 skbio/io/tests/data/ordination_error9              |    18 +
 skbio/io/tests/data/ordination_example2_scores     |    42 +
 skbio/io/tests/data/ordination_example3_scores     |    44 +
 .../tests/data/ordination_exp_Ordination_CCA_site  |    10 +
 .../ordination_exp_Ordination_CCA_site_constraints |    10 +
 .../data/ordination_exp_Ordination_CCA_species     |     9 +
 .../tests/data/ordination_exp_Ordination_PCoA_site |     9 +
 .../tests/data/ordination_exp_Ordination_RDA_site  |    10 +
 .../ordination_exp_Ordination_RDA_site_constraints |    10 +
 .../data/ordination_exp_Ordination_RDA_species     |     6 +
 skbio/io/tests/data/phylip_dna_3_seqs              |     4 +
 skbio/io/tests/data/phylip_single_seq_long         |     2 +
 skbio/io/tests/data/phylip_single_seq_short        |     2 +
 skbio/io/tests/data/phylip_two_chunks              |     3 +
 skbio/io/tests/data/phylip_variable_length_ids     |     7 +
 skbio/io/tests/data/qseq_invalid_filter            |     1 +
 skbio/io/tests/data/qseq_invalid_lane              |     1 +
 skbio/io/tests/data/qseq_invalid_read              |     1 +
 skbio/io/tests/data/qseq_invalid_tile              |     1 +
 skbio/io/tests/data/qseq_invalid_x                 |     1 +
 skbio/io/tests/data/qseq_invalid_y                 |     1 +
 skbio/io/tests/data/qseq_multi_seq_illumina1.3     |     4 +
 skbio/io/tests/data/qseq_single_seq_sanger         |     1 +
 skbio/io/tests/data/qual_2_seqs_defaults           |     4 +
 skbio/io/tests/data/qual_3_seqs_defaults           |     8 +
 .../tests/data/qual_3_seqs_defaults_desc_mismatch  |     8 +
 skbio/io/tests/data/qual_3_seqs_defaults_extra     |    10 +
 .../io/tests/data/qual_3_seqs_defaults_id_mismatch |     8 +
 .../data/qual_3_seqs_defaults_length_mismatch      |     8 +
 skbio/io/tests/data/qual_3_seqs_non_defaults       |    14 +
 .../qual_description_newline_replacement_empty_str |     4 +
 ...qual_description_newline_replacement_multi_char |     4 +
 .../data/qual_description_newline_replacement_none |    15 +
 .../data/qual_id_whitespace_replacement_empty_str  |     4 +
 .../data/qual_id_whitespace_replacement_multi_char |     4 +
 .../tests/data/qual_id_whitespace_replacement_none |     7 +
 skbio/io/tests/data/qual_invalid_blank_line        |     9 +
 skbio/io/tests/data/qual_invalid_legacy_format     |     2 +
 skbio/io/tests/data/qual_invalid_missing_header    |     2 +
 .../data/qual_invalid_missing_qual_scores_first    |     7 +
 .../data/qual_invalid_missing_qual_scores_last     |     5 +
 .../data/qual_invalid_missing_qual_scores_middle   |     7 +
 skbio/io/tests/data/qual_invalid_qual_scores_float |     8 +
 .../tests/data/qual_invalid_qual_scores_negative   |     8 +
 .../io/tests/data/qual_invalid_qual_scores_string  |     8 +
 .../tests/data/qual_invalid_whitespace_only_line   |     9 +
 skbio/io/tests/data/qual_max_width_1               |    11 +
 skbio/io/tests/data/qual_max_width_5               |    34 +
 skbio/io/tests/data/qual_multi_seq                 |    14 +
 skbio/io/tests/data/qual_multi_seq_roundtrip       |     6 +
 skbio/io/tests/data/qual_prot_seqs_odd_labels      |     8 +
 .../data/qual_sequence_collection_different_type   |     6 +
 .../io/tests/data/qual_single_bio_seq_non_defaults |     5 +
 .../io/tests/data/qual_single_dna_seq_non_defaults |     5 +
 .../io/tests/data/qual_single_nuc_seq_non_defaults |     6 +
 .../tests/data/qual_single_prot_seq_non_defaults   |     4 +
 .../io/tests/data/qual_single_rna_seq_non_defaults |     5 +
 skbio/io/tests/data/qual_single_seq                |     2 +
 skbio/io/tests/data/real_file                      |     5 +
 skbio/io/tests/data/real_file_2                    |     6 +
 .../tests/data/sanger_full_range_as_illumina.fastq |     8 +
 .../tests/data/sanger_full_range_as_sanger.fastq   |     8 +
 .../data/sanger_full_range_original_sanger.fastq   |     8 +
 .../data/solexa_full_range_original_solexa.fastq   |     8 +
 skbio/io/tests/data/tsv_10_fields                  |     6 +
 skbio/io/tests/data/tsv_8_fields                   |     6 +
 skbio/io/tests/data/whitespace_only                |    17 +
 skbio/io/tests/data/wrapping_as_illumina.fastq     |    12 +
 skbio/io/tests/data/wrapping_as_sanger.fastq       |    12 +
 skbio/io/tests/data/wrapping_original_sanger.fastq |    24 +
 skbio/io/tests/test_base.py                        |   359 +
 skbio/io/tests/test_clustal.py                     |   265 +
 skbio/io/tests/test_fasta.py                       |   893 ++
 skbio/io/tests/test_fastq.py                       |   551 +
 skbio/io/tests/test_lsmat.py                       |   251 +
 skbio/io/tests/test_newick.py                      |   371 +
 skbio/io/tests/test_ordination.py                  |   228 +
 skbio/io/tests/test_phylip.py                      |   102 +
 skbio/io/tests/test_qseq.py                        |   294 +
 skbio/io/tests/test_registry.py                    |  1228 ++
 skbio/io/tests/test_util.py                        |   123 +
 skbio/io/util.py                                   |    99 +
 skbio/parse/__init__.py                            |    12 +
 skbio/parse/record.py                              |   491 +
 skbio/parse/record_finder.py                       |   193 +
 skbio/parse/sequences/__init__.py                  |   201 +
 skbio/parse/sequences/_exception.py                |    16 +
 skbio/parse/sequences/clustal.py                   |   100 +
 skbio/parse/sequences/factory.py                   |   147 +
 skbio/parse/sequences/fasta.py                     |   240 +
 skbio/parse/sequences/fastq.py                     |   176 +
 skbio/parse/sequences/iterator.py                  |   206 +
 skbio/parse/sequences/tests/__init__.py            |     9 +
 skbio/parse/sequences/tests/data/fna1.fasta        |     4 +
 skbio/parse/sequences/tests/data/fna1.fna.gz       |   Bin 0 -> 49 bytes
 skbio/parse/sequences/tests/data/fna1.qual         |     4 +
 skbio/parse/sequences/tests/data/fq1.fastq.gz      |   Bin 0 -> 60 bytes
 skbio/parse/sequences/tests/data/fq1.fq            |     8 +
 skbio/parse/sequences/tests/data/noextensionfasta  |     4 +
 skbio/parse/sequences/tests/data/qs1.qseq.gz       |   Bin 0 -> 91 bytes
 skbio/parse/sequences/tests/test_clustal.py        |   155 +
 skbio/parse/sequences/tests/test_factory.py        |   201 +
 skbio/parse/sequences/tests/test_fasta.py          |   196 +
 skbio/parse/sequences/tests/test_fastq.py          |   223 +
 skbio/parse/sequences/tests/test_iterator.py       |   336 +
 skbio/parse/tests/__init__.py                      |     9 +
 skbio/parse/tests/test_record.py                   |   550 +
 skbio/parse/tests/test_record_finder.py            |   257 +
 skbio/sequence/__init__.py                         |   163 +
 skbio/sequence/_exception.py                       |    29 +
 skbio/sequence/_genetic_code.py                    |   620 +
 skbio/sequence/_sequence.py                        |  1969 +++
 skbio/sequence/tests/__init__.py                   |     9 +
 skbio/sequence/tests/test_genetic_code.py          |   377 +
 skbio/sequence/tests/test_sequence.py              |  1418 ++
 skbio/stats/__init__.py                            |    49 +
 skbio/stats/__subsample.c                          |  6415 +++++++++
 skbio/stats/__subsample.pyx                        |    36 +
 skbio/stats/_misc.py                               |    74 +
 skbio/stats/_subsample.py                          |   325 +
 skbio/stats/distance/__init__.py                   |   206 +
 skbio/stats/distance/_anosim.py                    |   275 +
 skbio/stats/distance/_base.py                      |  1232 ++
 skbio/stats/distance/_bioenv.py                    |   243 +
 skbio/stats/distance/_mantel.py                    |   490 +
 skbio/stats/distance/_permanova.py                 |   221 +
 skbio/stats/distance/tests/__init__.py             |     9 +
 .../stats/distance/tests/data/bioenv_df_vegan.txt  |    25 +
 .../stats/distance/tests/data/bioenv_dm_vegan.txt  |    25 +
 .../tests/data/bioenv_exp_results_vegan.txt        |     7 +
 skbio/stats/distance/tests/data/df.txt             |     8 +
 .../stats/distance/tests/data/df_extra_column.txt  |     8 +
 skbio/stats/distance/tests/data/dm.txt             |     7 +
 skbio/stats/distance/tests/data/dm2.txt            |     7 +
 skbio/stats/distance/tests/data/dm3.txt            |     7 +
 skbio/stats/distance/tests/data/dm4.txt            |     7 +
 skbio/stats/distance/tests/data/dm_reordered.txt   |     7 +
 skbio/stats/distance/tests/data/exp_results.txt    |    12 +
 .../data/exp_results_different_column_order.txt    |    12 +
 .../tests/data/exp_results_single_column.txt       |     2 +
 .../distance/tests/data/mantel_env_dm_vegan.txt    |    24 +
 .../distance/tests/data/mantel_veg_dm_vegan.txt    |    24 +
 .../tests/data/pwmantel_exp_results_all_dms.txt    |     7 +
 .../tests/data/pwmantel_exp_results_dm_dm2.txt     |     2 +
 .../data/pwmantel_exp_results_duplicate_dms.txt    |     4 +
 .../tests/data/pwmantel_exp_results_minimal.txt    |     4 +
 .../pwmantel_exp_results_minimal_with_labels.txt   |     4 +
 .../tests/data/pwmantel_exp_results_na_p_value.txt |     2 +
 ...tel_exp_results_reordered_distance_matrices.txt |     4 +
 skbio/stats/distance/tests/test_anosim.py          |   202 +
 skbio/stats/distance/tests/test_base.py            |   766 +
 skbio/stats/distance/tests/test_bioenv.py          |   222 +
 skbio/stats/distance/tests/test_mantel.py          |   569 +
 skbio/stats/distance/tests/test_permanova.py       |   200 +
 skbio/stats/gradient.py                            |   852 ++
 skbio/stats/ordination/__init__.py                 |   123 +
 skbio/stats/ordination/_base.py                    |   472 +
 .../_canonical_correspondence_analysis.py          |   250 +
 skbio/stats/ordination/_correspondence_analysis.py |   187 +
 .../ordination/_principal_coordinate_analysis.py   |   171 +
 skbio/stats/ordination/_redundancy_analysis.py     |   233 +
 skbio/stats/ordination/_utils.py                   |   223 +
 skbio/stats/ordination/tests/__init__.py           |     7 +
 skbio/stats/ordination/tests/data/L&L_CA_data      |     3 +
 skbio/stats/ordination/tests/data/PCoA_sample_data |    14 +
 .../stats/ordination/tests/data/PCoA_sample_data_2 |     6 +
 .../stats/ordination/tests/data/PCoA_sample_data_3 |    10 +
 skbio/stats/ordination/tests/data/example2_X       |    10 +
 skbio/stats/ordination/tests/data/example2_Y       |    12 +
 .../tests/data/example2_site_scaling1_from_vegan   |    10 +
 .../tests/data/example2_site_scaling2_from_vegan   |    10 +
 .../data/example2_species_scaling1_from_vegan      |     6 +
 .../data/example2_species_scaling2_from_vegan      |     6 +
 skbio/stats/ordination/tests/data/example3_X       |    10 +
 skbio/stats/ordination/tests/data/example3_Y       |    13 +
 .../tests/data/example3_site_scaling1_from_vegan   |    10 +
 .../tests/data/example3_site_scaling2_from_vegan   |    10 +
 .../data/example3_species_scaling1_from_vegan      |     9 +
 .../data/example3_species_scaling2_from_vegan      |     9 +
 .../tests/data/exp_PCoAEigenResults_site           |     9 +
 .../stats/ordination/tests/data/exp_PCoAzeros_site |    14 +
 skbio/stats/ordination/tests/test_ordination.py    |   896 ++
 skbio/stats/power.py                               |   994 ++
 skbio/stats/spatial.py                             |   197 +
 skbio/stats/tests/__init__.py                      |     9 +
 skbio/stats/tests/data/cr_data_out                 |     6 +
 skbio/stats/tests/data/cr_data_raw                 |     8 +
 skbio/stats/tests/data/cr_no_data_out              |     1 +
 skbio/stats/tests/data/cr_no_data_raw              |     0
 skbio/stats/tests/data/gr_w_msg_out                |     3 +
 skbio/stats/tests/data/gr_w_msg_raw                |     4 +
 skbio/stats/tests/data/gr_wo_msg_out               |     2 +
 skbio/stats/tests/data/gr_wo_msg_raw               |     3 +
 skbio/stats/tests/data/vr_out                      |    12 +
 skbio/stats/tests/data/vr_raw                      |    13 +
 skbio/stats/tests/data/vr_real_out                 |    14 +
 skbio/stats/tests/data/vr_real_raw                 |    13 +
 skbio/stats/tests/test_gradient.py                 |  1033 ++
 skbio/stats/tests/test_misc.py                     |    99 +
 skbio/stats/tests/test_power.py                    |   398 +
 skbio/stats/tests/test_spatial.py                  |   147 +
 skbio/stats/tests/test_subsample.py                |   262 +
 skbio/tests/__init__.py                            |     9 +
 skbio/tests/test_base.py                           |    26 +
 skbio/tests/test_workflow.py                       |   407 +
 skbio/tree/__init__.py                             |   260 +
 skbio/tree/_exception.py                           |    34 +
 skbio/tree/_majority_rule.py                       |   309 +
 skbio/tree/_nj.py                                  |   286 +
 skbio/tree/_tree.py                                |  3294 +++++
 skbio/tree/_trie.py                                |   264 +
 skbio/tree/tests/__init__.py                       |     7 +
 skbio/tree/tests/test_majority_rule.py             |   171 +
 skbio/tree/tests/test_nj.py                        |   207 +
 skbio/tree/tests/test_tree.py                      |  1350 ++
 skbio/tree/tests/test_trie.py                      |   216 +
 skbio/util/__init__.py                             |    76 +
 skbio/util/_exception.py                           |    14 +
 skbio/util/_misc.py                                |   334 +
 skbio/util/_testing.py                             |    47 +
 skbio/util/_warning.py                             |    22 +
 skbio/util/tests/__init__.py                       |     9 +
 skbio/util/tests/test_misc.py                      |   160 +
 skbio/util/tests/test_testing.py                   |    28 +
 skbio/workflow.py                                  |   550 +
 488 files changed, 86105 insertions(+), 125 deletions(-)

diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..84e1f79
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,20 @@
+# this file is based on the examples provided on scikit-learn's .coveragerc
+
+[run]
+omit =
+    */tests*
+    */__init__.py
+source = skbio
+branch = True
+include = */skbio/*
+
+[report]
+exclude_lines =
+    pragma: no cover
+    def __repr__
+    raise NotImplementedError
+    if __name__ == .__main__.:
+omit =
+    */tests*
+    */__init__.py
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..257d39b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,46 @@
+# Temporary files
+*~
+\#*#
+
+*.py[cod]
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+__pycache__
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+nosetests.xml
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
+
+# vi
+.*.swp
+
+# Sphinx builds
+doc/source/generated
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..a08278f
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,38 @@
+# Check on http://lint.travis-ci.org/ after modifying it!  Originally
+# modified from https://gist.github.com/dan-blanchard/7045057
+language: python
+env:
+  # Test against latest versions of numpy and matplotlib. Also test against
+  # older versions of numpy and matplotlib (pre-1.4.0). matplotlib's boxplot
+  # functionality was largely rewritten/refactored in 1.4.0 and some of skbio's
+  # code had to be updated in the process, so it's worth testing against a
+  # pre-1.4.0 version and whatever the latest version is.
+  - PYTHON_VERSION=3.4 NUMPY_VERSION= MATPLOTLIB_VERSION=
+  - PYTHON_VERSION=2.7 NUMPY_VERSION= MATPLOTLIB_VERSION= WITH_DOCTEST=True USE_CYTHON=TRUE
+  - PYTHON_VERSION=2.7 NUMPY_VERSION='=1.7' MATPLOTLIB_VERSION='=1.3.1' WITH_DOCTEST=True
+before_install:
+  - "export DISPLAY=:99.0"
+  - "sh -e /etc/init.d/xvfb start"
+  - wget http://repo.continuum.io/miniconda/Miniconda3-3.7.3-Linux-x86_64.sh -O miniconda.sh
+  - chmod +x miniconda.sh
+  - ./miniconda.sh -b
+  - export PATH=/home/travis/miniconda3/bin:$PATH
+  # Update conda itself
+  - conda update --yes conda
+install:
+  - conda create --yes -n env_name python=$PYTHON_VERSION pip numpy$NUMPY_VERSION scipy matplotlib$MATPLOTLIB_VERSION pandas nose pep8 Sphinx=1.2.2 IPython
+  - if [ ${USE_CYTHON} ]; then conda install --yes -n env_name cython; fi
+  - source activate env_name
+  - pip install sphinx-bootstrap-theme future six coveralls natsort pyflakes flake8 python-dateutil
+  - pip install -e . --no-deps
+script:
+  - if [ ${WITH_DOCTEST} ]; then PYTHONWARNINGS=ignore nosetests skbio --with-doctest --with-coverage -I DONOTIGNOREANYTHING; else PYTHONWARNINGS=ignore nosetests skbio --with-coverage -I DONOTIGNOREANYTHING; fi
+  - pep8 skbio setup.py checklist.py
+  - flake8 skbio setup.py checklist.py
+  - ./checklist.py
+  - pushd doc
+  - make clean
+  - make html
+  - popd
+after_success:
+  - coveralls
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..f694b21
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,297 @@
+# scikit-bio changelog
+
+## Version 0.2.3 (2015-02-13)
+
+### Features
+* Modified ``skbio.stats.distance.pwmantel`` to accept a list of filepaths. This is useful as it allows for a smaller amount of memory consumption as it only loads two matrices at a time as opposed to requiring that all distance matrices are loaded into memory.
+* Added ``skbio.util.find_duplicates`` for finding duplicate elements in an iterable.
+
+### Bug fixes
+* Fixed floating point precision bugs in ``Alignment.position_frequencies``, ``Alignment.position_entropies``, ``Alignment.omit_gap_positions``, ``Alignment.omit_gap_sequences``, ``BiologicalSequence.k_word_frequencies``, and ``SequenceCollection.k_word_frequencies`` ([#801](https://github.com/biocore/scikit-bio/issues/801)).
+
+### Backward-incompatible changes
+* Removed ``feature_types`` attribute from ``BiologicalSequence`` and all subclasses ([#797](https://github.com/biocore/scikit-bio/pull/797)).
+* Removed ``find_features`` method from ``BiologicalSequence`` and ``ProteinSequence`` ([#797](https://github.com/biocore/scikit-bio/pull/797)).
+* ``BiologicalSequence.k_word_frequencies`` now returns a ``collections.defaultdict`` of type ``float`` instead of type ``int``. This only affects the "default" case, when a key isn't present in the dictionary. Previous behavior would return ``0`` as an ``int``, while the new behavior is to return ``0.0`` as a ``float``. This change also affects the ``defaultdict``s that are returned by ``SequenceCollection.k_word_frequencies``.
+
+### Miscellaneous
+* ``DissimilarityMatrix`` and ``DistanceMatrix`` now report duplicate IDs in the ``DissimilarityMatrixError`` message that can be raised during validation.
+
+## Version 0.2.2 (2014-12-04)
+
+### Features
+* Added ``plot`` method to ``skbio.stats.distance.DissimilarityMatrix`` for creating basic heatmaps of a dissimilarity/distance matrix (see [#684](https://github.com/biocore/scikit-bio/issues/684)). Also added  ``_repr_png_`` and ``_repr_svg_`` methods for automatic display in the IPython Notebook, with ``png`` and ``svg`` properties for direct access.
+* Added `__str__` method to `skbio.stats.ordination.OrdinationResults`.
+* Added ``skbio.stats.distance.anosim`` and ``skbio.stats.distance.permanova`` functions, which replace the ``skbio.stats.distance.ANOSIM`` and ``skbio.stats.distance.PERMANOVA`` classes. These new functions provide simpler procedural interfaces to running these statistical methods. They also provide more convenient access to results by returning a ``pandas.Series`` instead of a ``CategoricalStatsResults`` object. These functions have more extensive documentation than their previous vers [...]
+* Added `skbio.stats.power` for performing empirical power analysis. The module uses existing datasets and iteratively draws samples to estimate the number of samples needed to see a significant difference for a given critical value.
+* Added `skbio.stats.isubsample` for subsampling from an unknown number of values. This method supports subsampling from multiple partitions and does not require that all items be stored in memory, requiring approximately `O(N*M)`` space where `N` is the number of partitions and `M` is the maximum subsample size.
+* Added ``skbio.stats.subsample_counts``, which replaces ``skbio.stats.subsample``. See deprecation section below for more details ([#770](https://github.com/biocore/scikit-bio/issues/770)).
+
+### Bug fixes
+* Fixed issue where SSW wouldn't compile on i686 architectures ([#409](https://github.com/biocore/scikit-bio/issues/409)).
+
+### Deprecated functionality
+* Deprecated ``skbio.stats.p_value_to_str``. This function will be removed in scikit-bio 0.3.0. Permutation-based p-values in scikit-bio are calculated as ``(num_extreme + 1) / (num_permutations + 1)``, so it is impossible to obtain a p-value of zero. This function historically existed for correcting the number of digits displayed when obtaining a p-value of zero. Since this is no longer possible, this functionality will be removed.
+* Deprecated ``skbio.stats.distance.ANOSIM`` and ``skbio.stats.distance.PERMANOVA`` in favor of ``skbio.stats.distance.anosim`` and ``skbio.stats.distance.permanova``, respectively.
+* Deprecated ``skbio.stats.distance.CategoricalStatsResults`` in favor of using ``pandas.Series`` to store statistical method results. ``anosim`` and ``permanova`` return ``pandas.Series`` instead of ``CategoricalStatsResults``.
+* Deprecated ``skbio.stats.subsample`` in favor of ``skbio.stats.subsample_counts``, which provides an identical interface; only the function name has changed. ``skbio.stats.subsample`` will be removed in scikit-bio 0.3.0.
+
+### Backward-incompatible changes
+* Deprecation warnings are now raised using ``DeprecationWarning`` instead of ``UserWarning`` ([#774](https://github.com/biocore/scikit-bio/issues/774)).
+
+### Miscellaneous
+* The ``pandas.DataFrame`` returned by ``skbio.stats.distance.pwmantel`` now stores p-values as floats and does not convert them to strings with a specific number of digits. p-values that were previously stored as "N/A" are now stored as ``np.nan`` for consistency with other statistical methods in scikit-bio. See note in "Deprecated functionality" above regarding ``p_value_to_str`` for details.
+* scikit-bio now supports versions of IPython < 2.0.0 ([#767](https://github.com/biocore/scikit-bio/issues/767)).
+
+## Version 0.2.1 (2014-10-27)
+
+This is an alpha release of scikit-bio. At this stage, major backwards-incompatible API changes can and will happen. Unified I/O with the scikit-bio I/O registry was the focus of this release.
+
+### Features
+* Added ``strict`` and ``lookup`` optional parameters to ``skbio.stats.distance.mantel`` for handling reordering and matching of IDs when provided ``DistanceMatrix`` instances as input (these parameters were previously only available in ``skbio.stats.distance.pwmantel``).
+* ``skbio.stats.distance.pwmantel`` now accepts an iterable of ``array_like`` objects. Previously, only ``DistanceMatrix`` instances were allowed.
+* Added ``plot`` method to ``skbio.stats.ordination.OrdinationResults`` for creating basic 3-D matplotlib scatterplots of ordination results, optionally colored by metadata in a ``pandas.DataFrame`` (see [#518](https://github.com/biocore/scikit-bio/issues/518)). Also added  ``_repr_png_`` and ``_repr_svg_`` methods for automatic display in the IPython Notebook, with ``png`` and ``svg`` properties for direct access.
+* Added ``skbio.stats.ordination.assert_ordination_results_equal`` for comparing ``OrdinationResults`` objects for equality in unit tests.
+* ``BiologicalSequence`` (and its subclasses) now optionally store Phred quality scores. A biological sequence's quality scores are stored as a 1-D ``numpy.ndarray`` of nonnegative integers that is the same length as the biological sequence. Quality scores can be provided upon object instantiation via the keyword argument ``quality``, and can be retrieved via the ``BiologicalSequence.quality`` property. ``BiologicalSequence.has_quality`` is also provided for determining whether a biologi [...]
+* Added ``BiologicalSequence.sequence`` property for retrieving the underlying string representing the sequence characters. This was previously (and still is) accessible via ``BiologicalSequence.__str__``. It is provided via a property for convenience and explicitness.
+* Added ``BiologicalSequence.equals`` for full control over equality testing of biological sequences. By default, biological sequences must have the same type, underlying sequence of characters, identifier, description, and quality scores to compare equal. These properties can be ignored via the keyword argument ``ignore``. The behavior of ``BiologicalSequence.__eq__``/``__ne__`` remains unchanged (only type and underlying sequence of characters are compared).
+* Added ``BiologicalSequence.copy`` for creating a copy of a biological sequence, optionally with one or more attributes updated.
+* ``BiologicalSequence.__getitem__`` now supports specifying a sequence of indices to take from the biological sequence.
+* Methods to read and write taxonomies are now available under ``skbio.tree.TreeNode.from_taxonomy`` and ``skbio.tree.TreeNode.to_taxonomy`` respectively.
+* Added ``SequenceCollection.update_ids``, which provides a flexible way of updating sequence IDs on a ``SequenceCollection`` or ``Alignment`` (note that a new object is returned, since instances of these classes are immutable). Deprecated ``SequenceCollection.int_map`` in favor of this new method; it will be removed in scikit-bio 0.3.0.
+* Added ``skbio.util.cardinal_to_ordinal`` for converting a cardinal number to ordinal string (e.g., useful for error messages).
+* New I/O Registry: supports multiple file formats, automatic file format detection when reading, unified procedural ``skbio.io.read`` and ``skbio.io.write`` in addition to OOP interfaces (``read/write`` methods) on the below objects. See ``skbio.io`` for more details.
+    - Added "clustal" format support:
+        * Has sniffer
+        * Readers: ``Alignment``
+        * Writers: ``Alignment``
+    - Added "lsmat" format support:
+        * Has sniffer
+        * Readers: ``DissimilarityMatrix``, ``DistanceMatrix``
+        * Writers: ``DissimilarityMatrix``, ``DistanceMatrix``
+    - Added "ordination" format support:
+        * Has sniffer
+        * Readers: ``OrdinationResults``
+        * Writers: ``OrdinationResults``
+    - Added "newick" format support:
+        * Has sniffer
+        * Readers: ``TreeNode``
+        * Writers: ``TreeNode``
+    - Added "phylip" format support:
+        * No sniffer
+        * Readers: None
+        * Writers: ``Alignment``
+    - Added "qseq" format support:
+        * Has sniffer
+        * Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
+        * Writers: None
+    - Added "fasta"/QUAL format support:
+        * Has sniffer
+        * Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``Alignment``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
+        * Writers: same as readers
+    - Added "fastq" format support:
+        * Has sniffer
+        * Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``Alignment``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
+        * Writers: same as readers
+
+### Bug fixes
+
+* Removed ``constructor`` parameter from ``Alignment.k_word_frequencies``, ``BiologicalSequence.k_words``, ``BiologicalSequence.k_word_counts``, and ``BiologicalSequence.k_word_frequencies`` as it had no effect (it was never hooked up in the underlying code). ``BiologicalSequence.k_words`` now returns a generator of ``BiologicalSequence`` objects instead of strings.
+* Modified the ``Alignment`` constructor to verify that all sequences have the same length, if not, raise an ``AlignmentError`` exception.  Updated the method ``Alignment.subalignment`` to calculate the indices only once now that identical sequence length is guaranteed.
+
+### Deprecated functionality
+* Deprecated ``constructor`` parameter in ``Alignment.majority_consensus`` in favor of having users call ``str`` on the returned ``BiologicalSequence``. This parameter will be removed in scikit-bio 0.3.0.
+
+* Existing I/O functionality deprecated in favor of I/O registry, old functionality will be removed in scikit-bio 0.3.0. All functionality can be found at ``skbio.io.read``, ``skbio.io.write``, and the methods listed below:
+    * Deprecated the following "clustal" readers/writers:
+        - ``write_clustal`` -> ``Alignment.write``
+        - ``parse_clustal`` -> ``Alignment.read``
+
+    * Deprecated the following distance matrix format ("lsmat") readers/writers:
+        - ``DissimilarityMatrix.from_file`` -> ``DissimilarityMatrix.read``
+        - ``DissimilarityMatrix.to_file`` -> ``DissimilarityMatrix.write``
+        - ``DistanceMatrix.from_file`` -> ``DistanceMatrix.read``
+        - ``DistanceMatrix.to_file`` -> ``DistanceMatrix.write``
+
+    * Deprecated the following ordination format ("ordination") readers/writers:
+        - ``OrdinationResults.from_file`` -> ``OrdinationResults.read``
+        - ``OrdinationResults.to_file`` -> ``OrdinationResults.write``
+
+    * Deprecated the following "newick" readers/writers:
+        - ``TreeNode.from_file`` -> ``TreeNode.read``
+        - ``TreeNode.from_newick`` -> ``TreeNode.read``
+        - ``TreeNode.to_newick`` -> ``TreeNode.write``
+
+    * Deprecated the following "phylip" writers:
+        - ``Alignment.to_phylip`` -> ``Alignment.write``
+
+    * Deprecated the following "fasta"/QUAL readers/writers:
+        - ``SequenceCollection.from_fasta_records`` -> ``SequenceCollection.read``
+        - ``SequenceCollection.to_fasta`` -> ``SequenceCollection.write``
+        - ``fasta_from_sequences`` -> ``skbio.io.write(obj, into=<file>, format='fasta')``
+        - ``fasta_from_alignment`` -> ``Alignment.write``
+        - ``parse_fasta`` -> ``skbio.io.read(<fasta>, format='fasta')``
+        - ``parse_qual`` -> ``skbio.io.read(<fasta>, format='fasta', qual=<file>)``
+        - ``BiologicalSequence.to_fasta`` -> ``BiologicalSequence.write``
+
+    * Deprecated the following "fastq" readers/writers:
+        - ``parse_fastq`` -> ``skbio.io.read(<fastq>, format='fastq')``
+        - ``format_fastq_record`` -> ``skbio.io.write(<fastq>, format='fastq')``
+
+### Backward-incompatible changes
+
+* ``skbio.stats.distance.mantel`` now returns a 3-element tuple containing correlation coefficient, p-value, and the number of matching rows/cols in the distance matrices (``n``). The return value was previously a 2-element tuple containing only the correlation coefficient and p-value.
+* ``skbio.stats.distance.mantel`` reorders input ``DistanceMatrix`` instances based on matching IDs (see optional parameters ``strict`` and ``lookup`` for controlling this behavior). In the past, ``DistanceMatrix`` instances were treated the same as ``array_like`` input and no reordering took place, regardless of ID (mis)matches. ``array_like`` input behavior remains the same.
+* If mismatched types are provided to ``skbio.stats.distance.mantel`` (e.g., a ``DistanceMatrix`` and ``array_like``), a ``TypeError`` will be raised.
+
+### Miscellaneous
+
+* Added git timestamp checking to checklist.py, ensuring that when changes are made to Cython (.pyx) files, their corresponding generated C files are also updated.
+* Fixed performance bug when instantiating ``BiologicalSequence`` objects. The previous runtime scaled linearly with sequence length; it is now constant time when the sequence is already a string. See [#623](https://github.com/biocore/scikit-bio/issues/623) for details.
+* IPython and six are now required dependencies.
+
+## Version 0.2.0 (2014-08-07)
+
+This is an initial alpha release of scikit-bio. At this stage, major backwards-incompatible API changes can and will happen. Many backwards-incompatible API changes were made since the previous release.
+
+### Features
+
+* Added ability to compute distances between sequences in a ``SequenceCollection`` object ([#509](https://github.com/biocore/scikit-bio/issues/509)), and expanded ``Alignment.distance`` to allow the user to pass a function for computing distances (the default distance metric is still ``scipy.spatial.distance.hamming``) ([#194](https://github.com/biocore/scikit-bio/issues/194)).
+* Added functionality to not penalize terminal gaps in global alignment. This functionality results in more biologically relevant global alignments (see [#537](https://github.com/biocore/scikit-bio/issues/537) for discussion of the issue) and is now the default behavior for global alignment.
+* The python global aligners (``global_pairwise_align``, ``global_pairwise_align_nucleotide``, and ``global_pairwise_align_protein``) now support aligning pairs of sequences, pairs of alignments, and a sequence and an alignment (see [#550](https://github.com/biocore/scikit-bio/issues/550)). This functionality supports progressive multiple sequence alignment, among other things such as adding a sequence to an existing alignment.
+* Added ``StockholmAlignment.to_file`` for writing Stockholm-formatted files.
+* Added ``strict=True`` optional parameter to ``DissimilarityMatrix.filter``.
+* Added ``TreeNode.find_all`` for finding all tree nodes that match a given name.
+
+
+### Bug fixes
+
+* Fixed bug that resulted in a ``ValueError`` from ``local_align_pairwise_nucleotide`` (see [#504](https://github.com/biocore/scikit-bio/issues/504)) under many circumstances. This would not generate incorrect results, but would cause the code to fail.
+
+### Backward-incompatible changes
+
+* Removed ``skbio.math``, leaving ``stats`` and ``diversity`` to become top level packages. For example, instead of ``from skbio.math.stats.ordination import PCoA`` you would now import ``from skbio.stats.ordination import PCoA``.
+* The module ``skbio.math.gradient`` as well as the contents of ``skbio.math.subsample`` and ``skbio.math.stats.misc`` are now found in ``skbio.stats``. As an example, to import subsample: ``from skbio.stats import subsample``; to import everything from gradient: ``from skbio.stats.gradient import *``.
+* The contents of ``skbio.math.stats.ordination.utils`` are now in ``skbio.stats.ordination``.
+* Removed ``skbio.app`` subpackage (i.e., the *application controller framework*) as this code has been ported to the standalone [burrito](https://github.com/biocore/burrito) Python package. This code was not specific to bioinformatics and is useful for wrapping command-line applications in general.
+* Removed ``skbio.core``, leaving ``alignment``, ``genetic_code``, ``sequence``, ``tree``, and ``workflow`` to become top level packages. For example, instead of ``from skbio.core.sequence import DNA`` you would now import ``from skbio.sequence import DNA``.
+* Removed ``skbio.util.exception`` and ``skbio.util.warning`` (see [#577](https://github.com/biocore/scikit-bio/issues/577) for the reasoning behind this change). The exceptions/warnings were moved to the following locations:
+ - ``FileFormatError``, ``RecordError``, ``FieldError``, and ``EfficiencyWarning`` have been moved to ``skbio.util``
+ - ``BiologicalSequenceError`` has been moved to ``skbio.sequence``
+ - ``SequenceCollectionError`` and ``StockholmParseError`` have been moved to ``skbio.alignment``
+ - ``DissimilarityMatrixError``, ``DistanceMatrixError``, ``DissimilarityMatrixFormatError``, and ``MissingIDError`` have been moved to ``skbio.stats.distance``
+ - ``TreeError``, ``NoLengthError``, ``DuplicateNodeError``, ``MissingNodeError``, and ``NoParentError`` have been moved to ``skbio.tree``
+ - ``FastqParseError`` has been moved to ``skbio.parse.sequences``
+ - ``GeneticCodeError``, ``GeneticCodeInitError``, and ``InvalidCodonError`` have been moved to ``skbio.genetic_code``
+* The contents of ``skbio.genetic_code`` formerly ``skbio.core.genetic_code`` are now in ``skbio.sequence``. The ``GeneticCodes`` dictionary is now a function ``genetic_code``. The functionality is the same, except that because this is now a function rather than a dict, retrieving a genetic code is done using a function call rather than a lookup (so, for example, ``GeneticCodes[2]`` becomes ``genetic_code(2)``.
+* Many submodules have been made private with the intention of simplifying imports for users. See [#562](https://github.com/biocore/scikit-bio/issues/562) for discussion of this change. The following list contains the previous module name and where imports from that module should now come from.
+ - ``skbio.alignment.ssw`` to ``skbio.alignment``
+ - ``skbio.alignment.alignment`` to ``skbio.alignment``
+ - ``skbio.alignment.pairwise`` to ``skbio.alignment``
+ - ``skbio.diversity.alpha.base`` to ``skbio.diversity.alpha``
+ - ``skbio.diversity.alpha.gini`` to ``skbio.diversity.alpha``
+ - ``skbio.diversity.alpha.lladser`` to ``skbio.diversity.alpha``
+ - ``skbio.diversity.beta.base`` to ``skbio.diversity.beta``
+ - ``skbio.draw.distributions`` to ``skbio.draw``
+ - ``skbio.stats.distance.anosim`` to ``skbio.stats.distance``
+ - ``skbio.stats.distance.base`` to ``skbio.stats.distance``
+ - ``skbio.stats.distance.permanova`` to ``skbio.stats.distance``
+ - ``skbio.distance`` to ``skbio.stats.distance``
+ - ``skbio.stats.ordination.base`` to ``skbio.stats.ordination``
+ - ``skbio.stats.ordination.canonical_correspondence_analysis`` to ``skbio.stats.ordination``
+ - ``skbio.stats.ordination.correspondence_analysis`` to ``skbio.stats.ordination``
+ - ``skbio.stats.ordination.principal_coordinate_analysis`` to ``skbio.stats.ordination``
+ - ``skbio.stats.ordination.redundancy_analysis`` to ``skbio.stats.ordination``
+ - ``skbio.tree.tree`` to ``skbio.tree``
+ - ``skbio.tree.trie`` to ``skbio.tree``
+ - ``skbio.util.misc`` to ``skbio.util``
+ - ``skbio.util.testing`` to ``skbio.util``
+ - ``skbio.util.exception`` to ``skbio.util``
+ - ``skbio.util.warning`` to ``skbio.util``
+* Moved ``skbio.distance`` contents into ``skbio.stats.distance``.
+
+### Miscellaneous
+
+* Relaxed requirement in ``BiologicalSequence.distance`` that sequences being compared are of equal length. This is relevant for Hamming distance, so the check is still performed in that case, but other distance metrics may not have that requirement. See [#504](https://github.com/biocore/scikit-bio/issues/507)).
+* Renamed ``powertrip.py`` repo-checking script to ``checklist.py`` for clarity.
+* ``checklist.py`` now ensures that all unit tests import from a minimally deep API. For example, it will produce an error if ``skbio.core.distance.DistanceMatrix`` is used over ``skbio.DistanceMatrix``.
+* Extra dimension is no longer calculated in ``skbio.stats.spatial.procrustes``.
+* Expanded documentation in various subpackages.
+* Added new scikit-bio logo. Thanks [Alina Prassas](http://cargocollective.com/alinaprassas)!
+
+## Version 0.1.4 (2014-06-25)
+
+This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
+
+### Features
+
+* Added Python implementations of Smith-Waterman and Needleman-Wunsch alignment as ``skbio.core.alignment.pairwise.local_pairwise_align`` and ``skbio.core.alignment.pairwise.global_pairwise_align``. These are much slower than native C implementations (e.g., ``skbio.core.alignment.local_pairwise_align_ssw``) and as a result raise an ``EfficencyWarning`` when called, but are included as they serve as useful educational examples as they’re simple to experiment with.
+* Added ``skbio.core.diversity.beta.pw_distances`` and ``skbio.core.diversity.beta.pw_distances_from_table``. These provide convenient access to the ``scipy.spatial.distance.pdist`` *beta diversity* metrics from within scikit-bio. The ``skbio.core.diversity.beta.pw_distances_from_table`` function will only be available temporarily, until the ``biom.table.Table`` object is merged into scikit-bio (see [#489](https://github.com/biocore/scikit-bio/issues/489)), at which point ``skbio.core.di [...]
+* Added ``skbio.core.alignment.StockholmAlignment``, which provides support for parsing [Stockholm-formatted alignment files](http://sonnhammer.sbc.su.se/Stockholm.html) and working with those alignments in the context RNA secondary structural information.
+* Added ``skbio.core.tree.majority_rule`` function for computing consensus trees from a list of trees.
+
+### Backward-incompatible changes
+
+* Function ``skbio.core.alignment.align_striped_smith_waterman`` renamed to ``local_pairwise_align_ssw`` and now returns an ``Alignment`` object instead of an ``AlignmentStructure``
+* The following keyword-arguments for ``StripedSmithWaterman`` and ``local_pairwise_align_ssw`` have been renamed:
+    * ``gap_open`` -> ``gap_open_penalty``
+    * ``gap_extend`` -> ``gap_extend_penalty``
+    * ``match`` -> ``match_score``
+    * ``mismatch`` -> ``mismatch_score``
+* Removed ``skbio.util.sort`` module in favor of [natsort](https://pypi.python.org/pypi/natsort) package.
+
+### Miscellaneous
+
+* Added powertrip.py script to perform basic sanity-checking of the repo based on recurring issues that weren't being caught until release time; added to Travis build.
+* Added RELEASE.md with release instructions.
+* Added intersphinx mappings to docs so that "See Also" references to numpy, scipy, matplotlib, and pandas are hyperlinks.
+* The following classes are no longer ``namedtuple`` subclasses (see [#359](https://github.com/biocore/scikit-bio/issues/359) for the rationale):
+    * ``skbio.math.stats.ordination.OrdinationResults``
+    * ``skbio.math.gradient.GroupResults``
+    * ``skbio.math.gradient.CategoryResults``
+    * ``skbio.math.gradient.GradientANOVAResults``
+* Added coding guidelines draft.
+* Added new alpha diversity formulas to the ``skbio.math.diversity.alpha`` documentation.
+
+## Version 0.1.3 (2014-06-12)
+
+This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
+
+### Features
+
+* Added ``enforce_qual_range`` parameter to ``parse_fastq`` (on by default, maintaining backward compatibility). This allows disabling of the quality score range-checking.
+* Added ``skbio.core.tree.nj``, which applies neighbor-joining for phylogenetic reconstruction.
+* Added ``bioenv``, ``mantel``, and ``pwmantel`` distance-based statistics to ``skbio.math.stats.distance`` subpackage.
+* Added ``skbio.math.stats.misc`` module for miscellaneous stats utility functions.
+* IDs are now optional when constructing a ``DissimilarityMatrix`` or ``DistanceMatrix`` (monotonically-increasing integers cast as strings are automatically used).
+* Added ``DistanceMatrix.permute`` method for randomly permuting rows and columns of a distance matrix.
+* Added the following methods to ``DissimilarityMatrix``: ``filter``, ``index``, and ``__contains__`` for ID-based filtering, index lookup, and membership testing, respectively.
+* Added ``ignore_comment`` parameter to ``parse_fasta`` (off by default, maintaining backward compatibility). This handles stripping the comment field from the header line (i.e., all characters beginning with the first space) before returning the label.
+* Added imports of ``BiologicalSequence``, ``NucleotideSequence``, ``DNA``, ``DNASequence``, ``RNA``, ``RNASequence``, ``Protein``, ``ProteinSequence``, ``DistanceMatrix``, ``align_striped_smith_waterman``, `` SequenceCollection``, ``Alignment``, ``TreeNode``, ``nj``, ``parse_fasta``, ``parse_fastq``, ``parse_qual``, ``FastaIterator``, ``FastqIterator``, ``SequenceIterator`` in ``skbio/__init__.py`` for convenient importing. For example, it's now possible to ``from skbio import Alignment [...]
+
+### Bug fixes
+
+* Fixed a couple of unit tests that could fail stochastically.
+* Added missing ``__init__.py`` files to a couple of test directories so that these tests won't be skipped.
+* ``parse_fastq`` now raises an error on dangling records.
+* Fixed several warnings that were raised while running the test suite with Python 3.4.
+
+### Backward-incompatible changes
+
+* Functionality imported from ``skbio.core.ssw`` must now be imported from ``skbio.core.alignment`` instead.
+
+### Miscellaneous
+
+* Code is now flake8-compliant; added flake8 checking to Travis build.
+* Various additions and improvements to documentation (API, installation instructions, developer instructions, etc.).
+* ``__future__`` imports are now standardized across the codebase.
+* New website front page and styling changes throughout. Moved docs site to its own versioned subdirectories.
+* Reorganized alignment data structures and algorithms (e.g., SSW code, ``Alignment`` class, etc.) into an ``skbio.core.alignment`` subpackage.
+
+## Version 0.1.1 (2014-05-16)
+
+Fixes to setup.py. This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
+
+## Version 0.1.0 (2014-05-15)
+
+Initial pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..11008cf
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,139 @@
+Contributing to scikit-bio
+==========================
+
+[scikit-bio](http://www.scikit-bio.org) is an open source software package, and we welcome community contributions. You can find the source code and test code for scikit-bio under public revision control in the scikit-bio git repository on [GitHub](https://github.com/biocore/scikit-bio). We very much welcome contributions.
+
+This document covers what you should do to get started with contributing to scikit-bio. You should read this whole document before considering submitting code to scikit-bio. This will save time for both you and the scikit-bio developers.
+
+Type of Submissions
+-------------------
+
+Some of the types of contributions we're interested in are new features (big or small, but for big ones it's generally a good idea to ask us if we're interested in including it before starting development), bug fixes, and documentation updates, additions, and fixes.
+
+When considering submitting a new feature to scikit-bio, you should begin by posting an issue to the [scikit-bio issue tracker](https://github.com/biocore/scikit-bio/issues). The information that you include in that post will differ based on the type of contribution. Your contribution will also need to be fully tested (discussed further below).
+
+* For new features, you'll want to describe why the functionality that you are proposing to add is relevant. For it to be relevant, it should be demonstrably useful to scikit-bio users. This typically means that a new analytic method is implemented (you should describe why it's useful, ideally including a link to a paper that uses this method), or an existing method is enhanced (your implementation matches the performance of the pre-existing method while reducing runtime, memory consumpt [...]
+
+* For bug fixes, you should provide a detailed description of the bug so other developers can reproduce it. We take bugs in scikit-bio very seriously. Bugs can be related to errors in code, documentation, or tests. Errors in documentation or tests are usually updated in the next major release of scikit-bio. Errors in code that could result in incorrect results or inability to access certain functionality may result in a new minor release of scikit-bio.
+
+ You should include the following information in your bug report:
+
+ 1. The exact command or function call that you issue to create the bug.
+ 2. A link to all necessary input files for reproducing the bug. These files should only be as large as necessary to create the bug. For example, if you have an input file with 10,000 fasta-formatted sequences but the error only arises due to one of the sequences, create a new fasta file with only that sequence, run the command that was giving you problems, and verify that you still get an error. Then post that command and link to the trimmed fasta file. This is *extremely* useful to oth [...]
+
+* For documentation additions, you should first post an issue describing what you propose to add, where you'd like to add it in the documentation, and a description of why you think it's an important addition. For documentation improvements and fixes, you should post an issue describing what is currently wrong or missing, and how you propose to address it. For more information about building and contributing to scikit-bio's documentation, see [this guide](doc/README.md).
+
+When you post your issue, the scikit-bio developers will respond to let you know if we agree with the addition or change. It's very important that you go through this step to avoid wasting time working on a feature that we are not interested in including in scikit-bio.
+
+
+Getting started: "quick fixes"
+------------------------------
+
+Some of our issues are labeled as ``quick fix``. Working on [these issues](https://github.com/biocore/scikit-bio/issues?direction=desc&labels=quick+fix&milestone=&page=1&sort=updated&state=open) is a good way to get started with contributing to scikit-bio. These are usually small bugs or documentation errors that will only require one or a few lines of code to fix. Getting started by working on one of these issues will allow you to familiarize yourself with our development process before [...]
+
+
+Code Review
+-----------
+
+When you submit code to scikit-bio, it will be reviewed by one or more scikit-bio developers. These reviews are intended to confirm a few points:
+
+* Your code is sufficiently well-tested (see Testing Guidelines below).
+* Your code adheres to our Coding Guidelines (see Coding Guidelines below).
+* Your code is sufficiently well-documented (see Coding Guidelines below).
+* Your code provides relevant changes or additions to scikit-bio (Type of Submissions above).
+
+This process is designed to ensure the quality of scikit-bio, and can be a very useful experience for new developers.
+
+Particularly for big changes, if you'd like feedback on your code in the form of a code review as you work, you should request help in the issue that you created and one of the scikit-bio developers will work with you to perform regular code reviews. This can greatly reduce development time (and frustration) so we highly recommend that new developers take advantage of this rather than submitting a pull request with a massive amount of code in one chunk. That can lead to frustration when  [...]
+
+
+Submitting code to scikit-bio
+-----------------------------
+
+scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pull Request](https://help.github.com/articles/using-pull-requests) mechanism for accepting submissions. You should go through the following steps to submit code to scikit-bio.
+
+1. Begin by [creating an issue](https://github.com/biocore/scikit-bio/issues) describing your proposed change. This should include a description of your proposed change (is it a new feature, a bug fix, etc.), and note in the issue description that you want to work on it. Once you hear back from a maintainer that it is OK to make changes (i.e., they dont't have local edits, they agree with the change you'd like to make, and they're comfortable with you editing their code), we will assign  [...]
+
+2. [Fork](https://help.github.com/articles/fork-a-repo) the scikit-bio repository on the GitHub website to your GitHub account.
+
+3. Clone your forked repository to the system where you'll be developing with ``git clone``.
+
+4. Ensure that you have the latest version of all files (especially important if you cloned a long time ago, but you'll need to do this before submitting changes regardless). You should do this by adding scikit-bio as a remote repository and then pulling from that repository. You'll only need to run the ``git remote`` step one time:
+```
+git checkout master
+git remote add upstream https://github.com/biocore/scikit-bio.git
+git pull upstream master
+```
+
+5. Create a new topic branch that you will make your changes in with ``git checkout -b``:
+```
+git checkout -b my-topic-branch
+```
+
+6. Run ``nosetests --with-doctest ; pep8 skbio setup.py`` to confirm that the tests pass before you make any changes.
+
+7. Make your changes, add them (with ``git add``), and commit them (with ``git commit``). Don't forget to update associated scripts and tests as necessary. You should make incremental commits, rather than one massive commit at the end. Write descriptive commit messages to accompany each commit.
+
+8. When you think you're ready to submit your code, again ensure that you have the latest version of all files in case some changed while you were working on your edits. You can do this by merging master into your topic branch:
+```
+git checkout my-topic-branch
+git pull upstream master
+```
+
+9. Run ``nosetests --with-doctest ; pep8 skbio setup.py`` to ensure that your changes did not cause anything expected to break.
+
+10. Once the tests pass, you should push your changes to your forked repository on GitHub using:
+```
+git push origin my-topic-branch
+```
+
+11. Issue a [pull request](https://help.github.com/articles/using-pull-requests) on the GitHub website to request that we merge your branch's changes into scikit-bio's master branch. One of the scikit-bio developers will review your code at this stage. If we request changes (which is very common), *don't issue a new pull request*. You should make changes on your topic branch, and commit and push them to GitHub. Your pull request will update automatically.
+
+
+Coding Guidelines
+-----------------
+
+We adhere to the [PEP 8](http://www.python.org/dev/peps/pep-0008/) python coding guidelines for code and documentation standards. Before submitting any code to scikit-bio, you should read these carefully and apply the guidelines in your code.
+
+
+Testing Guidelines
+------------------
+
+All code that is added to scikit-bio must be unit tested, and the unit test code must be submitted in the same pull request as the library code that you are submitting. We will only merge code that is unit tested and that passes the [continuous integration build](https://github.com/biocore/scikit-bio/blob/master/.travis.yml), this build verifies that the:
+
+- Full test suite executes without errors.
+- Doctests execute correctly (currently only for Python 2.7).
+- C code can be correctly compiled.
+- Cython code is correctly generated.
+- All code is valid in Python 2.7 and >=3.3.
+- All tests import functionality from the appropriate minimally deep API.
+
+The scikit-bio coding guidelines describe our [expectations for unit tests](http://scikit-bio.org/development/coding_guidelines.html). You should review the unit test section before working on your test code.
+
+Tests can be executed using [nose](https://nose.readthedocs.org/en/latest/) by running `nosetests --with-doctest` from the base directory of the project or from within a Python or IPython session running the following code:
+
+``` python
+>>> import skbio
+>>> skbio.test()
+# full test suite is executed
+>>> skbio.parse.test()
+# tests for the parse module are executed
+```
+
+Note that this is possible because the lines below are added at the end of each `__init__.py` file in the package, so if you add a new module, be sure to include these lines in its `__init__.py`:
+
+```python
+from numpy.testing import Tester
+test = Tester().test
+```
+
+
+Documentation Guidelines
+------------------------
+
+We strive to keep scikit-bio's code well-documented, particularly its public-facing API. See our [documentation guide](doc/README.md) for more details on writing documentation in scikit-bio.
+
+Getting help with git
+=====================
+
+If you're new to ``git``, you'll probably find [gitref.org](http://gitref.org/) helpful.
diff --git a/COPYING.txt b/COPYING.txt
new file mode 100644
index 0000000..68bfae3
--- /dev/null
+++ b/COPYING.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2013--, scikit-bio development team.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+
+* Neither the names scikit-bio, skbio, or biocore nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..f986969
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,17 @@
+include CHANGELOG.md
+include COPYING.txt
+include CONTRIBUTING.md
+include README.rst
+include RELEASE.md
+include checklist.py
+
+graft assets
+graft doc
+graft ipynbs
+graft licenses
+graft skbio
+
+prune doc/build
+
+global-exclude *.pyc
+global-exclude *.pyo
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..443b1a0
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,164 @@
+::
+
+               _ _    _ _          _     _
+              (_) |  (_) |        | |   (_)
+      ___  ___ _| | ___| |_ ______| |__  _  ___
+     / __|/ __| | |/ / | __|______| '_ \| |/ _ \
+     \__ \ (__| |   <| | |_       | |_) | | (_) |
+     |___/\___|_|_|\_\_|\__|      |_.__/|_|\___/
+
+
+           Opisthokonta
+                   \  Amoebozoa
+                    \ /
+                     *    Euryarchaeota
+                      \     |_ Crenarchaeota
+                       \   *
+                        \ /
+                         *
+                        /
+                       /
+                      /
+                     *
+                    / \
+                   /   \
+        Proteobacteria  \
+                       Cyanobacteria
+
+|Build Status| |Coverage Status|
+
+scikit-bio is an open-source, BSD-licensed python package providing data structures, algorithms and educational resources for bioinformatics.
+
+To view scikit-bio's documentation, visit `scikit-bio.org
+<http://scikit-bio.org>`__.
+
+scikit-bio is currently in alpha. We are very actively developing it, and **backwards-incompatible interface changes can and will arise**. Once the API has started to solidify, we will strive to maintain backwards compatibility. We will provide deprecation warnings wherever possible in the scikit-bio code, documentation, and CHANGELOG.md.
+
+**Note:** Deprecation warnings will be issued using Python's ``DeprecationWarning`` class. Since Python 2.7, these types of warnings are **silenced by default**. When developing a tool that uses scikit-bio, we recommend enabling the display of deprecation warnings to be informed of upcoming API changes. For details on how to display deprecation warnings, see `Python's deprecation warning docs <https://docs.python.org/3/whatsnew/2.7.html#changes-to-the-handling-of-deprecation-warnings>`_.
+
+Installation of release version (recommended for most users)
+------------------------------------------------------------
+
+To install the latest release version of scikit-bio you should run::
+
+    pip install numpy
+    pip install scikit-bio
+
+Equivalently, you can use the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_ to install scikit-bio and all its dependencies, without having to compile them::
+
+     conda install scikit-bio
+
+Finally, most scikit-bio's dependencies (in particular, the ones that are trickier to build) are also available, albeit only for Python 2, in `Canopy Express <https://www.enthought.com/canopy-express/>`_.
+
+You can verify your installation by running the scikit-bio unit tests as follows::
+
+    nosetests --with-doctest skbio
+
+Installation of development version
+-----------------------------------
+
+If you're interested in working with the latest development release of scikit-bio (recommended for developers only, as the development code can be unstable and less documented than the release code), you can clone the repository and install as follows. This will require that you have ``git`` installed.
+::
+
+    git clone git at github.com:biocore/scikit-bio.git
+    cd scikit-bio
+    pip install .
+
+After this completes, you can run the scikit-bio unit tests as follows. You must first ``cd`` out of the ``scikit-bio`` directory for the tests to pass (here we ``cd`` to the home directory).
+::
+
+    cd
+    nosetests --with-doctest skbio
+
+For developers of scikit-bio, if you don't want to be forced to re-install after every change, you can modify the above ``pip install`` command to::
+
+    pip install -e .
+
+This will build scikit-bio's Cython extensions, and will create a link in the ``site-packages`` directory to the scikit-bio source directory. When you then make changes to code in the source directory, those will be used (e.g., by the unit tests) without re-installing.
+
+Finally, if you don't want to use ``pip`` to install scikit-bio, and prefer to just put ``scikit-bio`` in your ``$PYTHONPATH``, at the minimum you should run::
+
+    python setup.py build_ext --inplace
+
+This will build scikit-bio's Cython extensions, but not create a link to the scikit-bio source directory in ``site-packages``. If this isn't done, using certain components of scikit-bio will be inefficient and will produce an ``EfficiencyWarning``.
+
+Getting help
+------------
+
+To get help with scikit-bio, you should use the `skbio <http://stackoverflow.com/questions/tagged/skbio>`_ tag on StackOverflow (SO). Before posting a question, check out SO's guide on how to `ask a question <http://stackoverflow.com/questions/how-to-ask>`_. The scikit-bio developers regularly monitor the skbio SO tag.
+
+Licensing
+---------
+
+scikit-bio is available under the new BSD license. See
+`COPYING.txt <https://github.com/biocore/scikit-bio/blob/master/COPYING.txt>`__ for scikit-bio's license, and the
+`licenses directory <https://github.com/biocore/scikit-bio/tree/master/licenses>`_ for the licenses of third-party software that is
+(either partially or entirely) distributed with scikit-bio.
+
+Projects using scikit-bio
+-------------------------
+
+Some of the projects that we know of that are using scikit-bio are:
+
+-  `QIIME <http://qiime.org/>`__
+-  `Emperor <http://biocore.github.io/emperor/>`__
+-  `An Introduction to Applied
+   Bioinformatics <http://caporasolab.us/An-Introduction-To-Applied-Bioinformatics/>`__
+-  `tax2tree <https://github.com/biocore/tax2tree>`__
+
+If you're using scikit-bio in your own projects, you can issue a
+pull request to add them to this list.
+
+scikit-bio development
+----------------------
+
+If you're interested in getting involved in or learning about
+scikit-bio development, see `CONTRIBUTING.md <https://github.com/biocore/scikit-bio/blob/master/CONTRIBUTING.md>`__.
+
+See the `list of all of scikit-bio's contributors
+<https://github.com/biocore/scikit-bio/graphs/contributors>`__.
+
+Summaries of our weekly developer meetings are posted on
+HackPad. Click `here
+<https://hackpad.com/2014-scikit-bio-developer-meeting-notes-1S2RbMqy0iM>`__
+to view the meeting notes for 2014.
+
+The pre-history of scikit-bio
+-----------------------------
+
+scikit-bio began from code derived from `PyCogent
+<http://www.pycogent.org>`__ and `QIIME <http://www.qiime.org>`__, and
+the contributors and/or copyright holders have agreed to make the code
+they wrote for PyCogent and/or QIIME available under the BSD
+license. The contributors to PyCogent and/or QIIME modules that have
+been ported to scikit-bio are: Rob Knight (`@rob-knight
+<https://github.com/rob-knight>`__), Gavin Huttley (`@gavin-huttley
+<https://github.com/gavin-huttley>`__), Daniel McDonald (`@wasade
+<https://github.com/wasade>`__), Micah Hamady, Antonio Gonzalez
+(`@antgonza <https://github.com/antgonza>`__), Sandra Smit, Greg
+Caporaso (`@gregcaporaso <https://github.com/gregcaporaso>`__), Jai
+Ram Rideout (`@ElBrogrammer <https://github.com/ElBrogrammer>`__),
+Cathy Lozupone (`@clozupone <https://github.com/clozupone>`__), Mike Robeson
+(`@mikerobeson <https://github.com/mikerobeson>`__), Marcin Cieslik,
+Peter Maxwell, Jeremy Widmann, Zongzhi Liu, Michael Dwan, Logan Knecht
+(`@loganknecht <https://github.com/loganknecht>`__), Andrew Cochran,
+Jose Carlos Clemente (`@cleme <https://github.com/cleme>`__), Damien
+Coy, Levi McCracken, Andrew Butterfield, Will Van Treuren (`@wdwvt1
+<https://github.com/wdwvt1>`__), Justin Kuczynski (`@justin212k
+<https://github.com/justin212k>`__), Jose Antonio Navas Molina
+(`@josenavas <https://github.com/josenavas>`__), Matthew Wakefield
+(`@genomematt <https://github.com/genomematt>`__) and Jens Reeder
+(`@jensreeder <https://github.com/jensreeder>`__).
+
+Logo
+----
+
+scikit-bio's logo was created by `Alina Prassas <http://cargocollective.com/alinaprassas>`_.
+scikit-bio's ASCII art tree was created by `@gregcaporaso
+<https://github.com/gregcaporaso>`_. Our text logo was created at `patorjk.com
+<http://patorjk.com/software/taag/>`__.
+
+.. |Build Status| image:: https://travis-ci.org/biocore/scikit-bio.svg?branch=master
+   :target: https://travis-ci.org/biocore/scikit-bio
+.. |Coverage Status| image:: https://coveralls.io/repos/biocore/scikit-bio/badge.png
+   :target: https://coveralls.io/r/biocore/scikit-bio
diff --git a/RELEASE.md b/RELEASE.md
new file mode 100644
index 0000000..20de01a
--- /dev/null
+++ b/RELEASE.md
@@ -0,0 +1,124 @@
+# Releasing a new version of scikit-bio
+
+## Introduction
+
+Releasing a piece of software can simultaneously be an invigorating, intimidating, horrifying, and cathartic experience. This guide aims to make the release process as smooth as possible.
+
+To illustrate examples of commands you might run, let's assume that the current version is 1.2.3-dev and we want to release version 1.2.4. Our versioning system is based on Semantic Versioning, which you can read about at http://semver.org.
+
+**Note:** The following commands assume you are in the top-level directory of the scikit-bio repository unless otherwise noted. They also assume that you have [virtualenv](http://virtualenv.readthedocs.org/en/latest/#)/[virtualenvwrapper](http://virtualenvwrapper.readthedocs.org/en/latest/) installed.
+
+**Tip:** It can be efficient to have the help of a couple other devs, as some steps can be run in parallel. It's also useful to have a variety of platforms/environments to test on during the release process, so find friends that are Linux and Mac users!
+
+## Prep the release (part 1)
+
+1. Ensure the Travis build is passing against master.
+
+2. Update the version strings (1.2.3-dev) to the new version (1.2.4). There should only be two places this needs to be done: ``setup.py`` and ``skbio/__init__.py``. It's a good idea to ``grep`` for the current version string just to be safe:
+
+        grep -ir '1\.2\.3-dev' *
+
+3. Update ``CHANGELOG.md`` to include descriptions of the changes that made it into this release. Be sure to update the heading to include the new version (1.2.4) and the date of the release. Use the existing structure in the file as a template/guide.
+
+4. Submit a pull request with these changes and let Travis run.
+
+## Build the documentation
+
+In the meantime, you can build the documentation and update the website.
+
+**Note:** You will need to **fully install** (including built extensions) the exact version of scikit-bio that you are editing so that Sphinx will pull docstrings from the correct version of the code. **Make sure the version of scikit-bio that is imported by ``import skbio`` is the correct one!**
+
+1. Build the documentation locally:
+
+        make -C doc clean && make -C doc html
+
+2. Switch to the ``gh-pages`` branch of the repository.
+
+3. Remove everything from ``docs/latest/``:
+
+        git rm -rf docs/latest/*
+
+4. Create a directory for the new version of the docs and recreate the ``latest/`` directory:
+
+        mkdir docs/1.2.4
+        mkdir docs/latest
+
+5. Copy over the built documentation to both ``docs/1.2.4/`` and ``docs/latest``:
+
+        cp -r <path to skbio repo>/doc/build/html/* docs/1.2.4/
+        cp -r <path to skbio repo>/doc/build/html/* docs/latest/
+
+6. Add a new list item to ``index.html`` to link to ``docs/1.2.4/index.html``.
+
+7. Test out your changes by opening the site locally in a browser. Be sure to check the error console for any errors.
+
+8. Commit and push (either directly or as a pull request) to have the website updated. **Note:** This updates the live website, so be sure to poke through the live site to make sure things aren't broken and that version strings are correct.
+
+## Prep the release (part 2)
+
+If the tests passed on Travis (see step 4 of **Prep the release (part 1)** above), merge the pull request to update the version strings to 1.2.4.
+
+## Tag the release
+
+From the [scikit-bio GitHub page](https://github.com/biocore/scikit-bio), click on the releases tab and draft a new release. Use the version number for the tag name (1.2.4) and create the tag against master. Fill in a release title that is consistent with previous release titles and add a summary of the release (linking to ``CHANGELOG.md`` is a good idea). This release summary will be the primary information that we point users to when we announce the release. This is (at least experimen [...]
+
+Once the release is created on GitHub, it's a good idea to test out the release tarball before publishing to PyPI:
+
+1. Create a new virtualenv.
+
+2. Download the release tarball from GitHub, extract it, and ``cd`` into the top-level directory.
+
+3. Install the release and run the tests:
+
+        pip install numpy
+        pip install .
+        cd
+        nosetests --with-doctest skbio
+
+4. During this process (it can take awhile to install all of scikit-bio's dependencies), submit a pull request to update the version strings from 1.2.4 to 1.2.4-dev. Use the same strategy described above to update the version strings. Update ``CHANGELOG.md`` to include a new section for 1.2.4-dev (there won't be any changes to note here yet). **Do not merge this pull request yet.**
+
+## Test the source distribution
+
+Assuming the GitHub release tarball correctly installs and passes its tests, you're now ready to test the creation of the source distribution (``sdist``) that will be published to PyPI. It is important to test the source distribution because it is created in an entirely different way than the release tarball on GitHub. Thus, there is the danger of having two different release tarballs: the one created on GitHub and the one uploaded to PyPI.
+
+**Important:** Check ``MANIFEST.in`` to ensure that the files and directories it references still exist. Some may have been removed, renamed, or there may be new files/dirs that need to be included in the ``sdist`` release. This step in the release process has caused the most hangups; don't neglect ``MANIFEST.in``!
+
+1. Download the release tarball from GitHub, extract it, and ``cd`` into the top-level directory.
+
+2. Build a source distribution:
+
+        python setup.py sdist
+
+3. Create a new virtualenv and run:
+
+        cd
+        pip install numpy
+        pip install <path to extracted scikit-bio release>/dist/scikit-bio-1.2.4.tar.gz
+        nosetests --with-doctest skbio
+
+4. If everything goes well, it is finally time to push the release to PyPI:
+
+        python setup.py sdist upload
+
+    You must have the proper login credentials to add a release to PyPI. Currently [@gregcaporaso](https://github.com/gregcaporaso) has these, but they can be shared with other release managers.
+
+5. Once the release is available on PyPI, do a final round of testing. Create a new virtualenv and run:
+
+        cd
+        pip install numpy
+        pip install scikit-bio
+        nosetests --with-doctest skbio
+
+If this succeeds, the release appears to be a success!
+
+## Post-release cleanup
+
+1. Merge the latest pull request to update version strings to 1.2.4-dev.
+
+2. Close the release milestone on the GitHub issue tracker.
+
+3. Send an email to the skbio users and developers lists, and anyone else who might be interested (e.g., lab mailing lists). You might include links to the GitHub release page and ``CHANGELOG.md``.
+
+4. Tweet about the release, including a link to the GitHub release page (for example, for 0.1.3, the URL to include was https://github.com/biocore/scikit-bio/releases/tag/0.1.3).
+
+5. :beers:
diff --git a/assets/.no.gif b/assets/.no.gif
new file mode 100644
index 0000000..842577b
Binary files /dev/null and b/assets/.no.gif differ
diff --git a/assets/horizontal_powered_by.png b/assets/horizontal_powered_by.png
new file mode 100644
index 0000000..32a55ba
Binary files /dev/null and b/assets/horizontal_powered_by.png differ
diff --git a/assets/horizontal_powered_by.svg b/assets/horizontal_powered_by.svg
new file mode 100644
index 0000000..9cf2f80
--- /dev/null
+++ b/assets/horizontal_powered_by.svg
@@ -0,0 +1,152 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   width="812.5"
+   height="137.5"
+   xml:space="preserve"
+   sodipodi:docname="logo.pdf"><metadata
+     id="metadata8"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs6"><clipPath
+       clipPathUnits="userSpaceOnUse"
+       id="clipPath16"><path
+         d="M 0,110 650,110 650,0 0,0 0,110 z"
+         id="path18" /></clipPath></defs><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="640"
+     inkscape:window-height="480"
+     id="namedview4"
+     showgrid="false"
+     inkscape:zoom="0.39876923"
+     inkscape:cx="406.25"
+     inkscape:cy="68.75"
+     inkscape:window-x="65"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g10" /><g
+     id="g10"
+     inkscape:groupmode="layer"
+     inkscape:label="logo"
+     transform="matrix(1.25,0,0,-1.25,0,137.5)"><g
+       id="g12"><g
+         id="g14"
+         clip-path="url(#clipPath16)"><g
+           id="g20"
+           transform="translate(292.2666,65.0918)"><path
+             d="m 0,0 -21.41,0 0,-7.91 19.582,0 c 2.859,0 4.81,-0.498 5.853,-1.494 1.043,-0.996 1.565,-2.901 1.565,-5.713 l 0,-3.656 c 0,-2.813 -0.51,-4.717 -1.529,-5.713 -1.02,-0.996 -2.983,-1.494 -5.889,-1.494 l -18.563,0 c -2.906,0 -4.88,0.498 -5.923,1.494 -1.043,0.996 -1.565,2.9 -1.565,5.713 l 0,0.843 5.274,1.09 0,-4.535 23.027,0 0,8.578 -19.652,0 c -2.883,0 -4.84,0.498 -5.872,1.494 -1.031,0.996 -1.546,2.901 -1.546,5.713 l 0,2.883 c 0,2.789 0.515,4.693 1.546,5.713 1.032,1.019 2.989,1 [...]
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path22" /></g><g
+           id="g24"
+           transform="translate(339.3467,65.0918)"><path
+             d="m 0,0 -21.305,0 0,-21.375 21.305,0 0,7.277 5.766,-1.09 0,-3.585 c 0,-2.79 -0.522,-4.688 -1.565,-5.696 -1.043,-1.008 -3.017,-1.511 -5.924,-1.511 l -17.929,0 c -2.907,0 -4.869,0.486 -5.889,1.459 -1.02,0.972 -1.529,2.888 -1.529,5.748 l 0,16.066 c 0,2.836 0.515,4.752 1.547,5.748 1.031,0.996 2.988,1.494 5.871,1.494 l 17.929,0 c 2.907,0 4.881,-0.51 5.924,-1.529 1.043,-1.02 1.565,-2.924 1.565,-5.713 l 0,-2.883 L 0,-6.539 0,0 z"
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path26" /></g><path
+           d="m 364.771,39.111 -5.73,0 0,30.445 5.73,0 0,-30.445 z"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path28" /><path
+           d="m 443.614,39.111 -5.73,0 0,30.445 5.73,0 0,-30.445 z"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path30" /><g
+           id="g32"
+           transform="translate(474.0654,65.0918)"><path
+             d="m 0,0 0,-25.98 -5.73,0 0,25.98 -14.415,0 0,4.535 34.629,0 L 14.484,0 0,0 z"
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path34" /></g><path
+           d="m 495.974,60.943 15.68,0 0,-5.273 -15.68,0 0,5.273 z"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path36" /><g
+           id="g38"
+           transform="translate(550.0264,52.3652)"><path
+             d="m 0,0 -16.629,0 0,-7.77 16.629,0 c 1.406,0 2.361,0.2 2.865,0.598 0.504,0.399 0.756,1.102 0.756,2.109 l 0,2.321 c 0,1.008 -0.252,1.717 -0.756,2.127 C 2.361,-0.205 1.406,0 0,0 m 0,12.234 -16.629,0 0,-7.207 16.629,0 c 1.289,0 2.168,0.164 2.637,0.493 0.468,0.328 0.703,0.89 0.703,1.687 l 0,2.883 c 0,0.797 -0.229,1.353 -0.686,1.67 C 2.197,12.076 1.313,12.234 0,12.234 m -23.941,-25.488 0,30.445 27.351,0 c 2.555,0 4.395,-0.462 5.52,-1.388 1.125,-0.926 1.687,-2.432 1.687,-4.518 l  [...]
+             style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path40" /></g><path
+           d="m 575.567,69.557 7.383,0 0,-30.445 -7.383,0 0,30.445 z"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path42" /><path
+           d="m 605.356,44.947 24.539,0 0,19.02 -24.539,0 0,-19.02 z m -7.418,17.438 c 0,2.765 0.54,4.652 1.618,5.66 1.078,1.008 3.175,1.512 6.293,1.512 l 23.554,0 c 3.141,0 5.25,-0.504 6.317,-1.512 1.066,-1.008 1.593,-2.895 1.593,-5.66 l 0,-16.067 c 0,-2.789 -0.539,-4.687 -1.628,-5.695 -1.09,-1.008 -3.188,-1.512 -6.282,-1.512 l -23.554,0 c -3.118,0 -5.215,0.504 -6.293,1.512 -1.078,1.008 -1.618,2.906 -1.618,5.695 l 0,16.067 z"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path44" /><g
+           id="g46"
+           transform="translate(385.1572,100.188)"><path
+             d="m 0,0 c -1.031,-22.833 3.512,-45.326 3.629,-68.121 0.014,-2.55 8.376,-0.968 8.359,2.241 C 11.873,-43.482 7.34,-21.361 8.352,1.069 8.505,4.465 0.123,2.73 0,0"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path48" /></g><g
+           id="g50"
+           transform="translate(397.1553,42.6694)"><path
+             d="M 0,0 C 11.679,8.217 18.269,19.788 25.233,31.917 27.63,36.09 19.119,36.368 17.2,33.026 10.87,22.001 4.998,11.543 -5.613,4.075 -10.252,0.812 -3.493,-2.458 0,0"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path52" /></g><g
+           id="g54"
+           transform="translate(391.8623,40.8809)"><path
+             d="m 0,0 c 3.749,0.386 9.625,-5.464 12.45,-7.592 3.765,-2.836 7.355,-5.903 10.91,-8.993 7.206,-6.263 14.142,-12.828 21.413,-19.018 2.16,-1.837 9.915,1.386 7.782,3.202 C 43.479,-24.677 34.883,-16.41 25.732,-8.773 18.938,-3.104 10.673,5.345 1.211,4.371 -2.76,3.962 -5.813,-0.599 0,0"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path56" /></g><g
+           id="g58"
+           transform="translate(13.4189,47.918)"><path
+             d="m 0,0 c 0,-2.162 0.539,-3.756 1.605,-4.793 1.073,-1.031 2.368,-1.547 3.891,-1.547 1.547,0 2.871,0.533 3.973,1.606 1.101,1.072 1.652,2.73 1.652,4.974 0,2.139 -0.539,3.744 -1.617,4.811 C 8.432,6.117 7.143,6.645 5.648,6.645 4.166,6.645 2.854,6.082 1.711,4.945 0.574,3.814 0,2.162 0,0 m -3.146,-14.742 0,23.238 3.169,0 0,-2.18 c 0.745,0.856 1.588,1.495 2.532,1.922 0.943,0.428 2.08,0.639 3.422,0.639 1.752,0 3.298,-0.369 4.64,-1.107 1.342,-0.739 2.356,-1.782 3.035,-3.124 0.686,-1 [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path60" /></g><g
+           id="g62"
+           transform="translate(34.1787,48.0176)"><path
+             d="m 0,0 c 0,-2.156 0.574,-3.762 1.723,-4.834 1.142,-1.072 2.59,-1.605 4.33,-1.605 1.728,0 3.164,0.539 4.312,1.611 1.143,1.078 1.717,2.719 1.717,4.922 0,2.074 -0.574,3.65 -1.728,4.722 -1.155,1.073 -2.59,1.606 -4.301,1.606 -1.74,0 -3.188,-0.533 -4.33,-1.6 C 0.574,3.756 0,2.15 0,0 m -3.58,0 c 0,3.111 1.06,5.414 3.176,6.914 1.763,1.242 3.914,1.863 6.457,1.863 2.824,0 5.127,-0.756 6.92,-2.267 1.793,-1.518 2.689,-3.604 2.689,-6.276 0,-2.162 -0.398,-3.861 -1.189,-5.103 C 13.682,-6 [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path64" /></g><g
+           id="g66"
+           transform="translate(57.3174,39.6152)"><path
+             d="m 0,0 -6.281,16.799 3.597,0 L 0.58,7.102 1.799,3.498 c 0.053,0.176 0.41,1.33 1.066,3.463 l 3.264,9.838 3.58,0 3.076,-9.744 1.026,-3.211 1.177,3.246 3.516,9.709 3.387,0 L 15.469,0 11.854,0 8.59,10.061 7.793,12.926 3.639,0 0,0 z"
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path68" /></g><g
+           id="g70"
+           transform="translate(84.3584,49.6289)"><path
+             d="M 0,0 11.467,0 C 11.314,1.412 10.875,2.473 10.154,3.182 9.047,4.277 7.605,4.828 5.842,4.828 4.242,4.828 2.9,4.389 1.811,3.51 0.721,2.637 0.117,1.465 0,0 m 11.432,-4.605 3.591,-0.364 c -0.562,-1.717 -1.617,-3.052 -3.146,-4.002 -1.535,-0.949 -3.498,-1.424 -5.883,-1.424 -3,0 -5.385,0.756 -7.142,2.274 -1.758,1.512 -2.637,3.633 -2.637,6.363 0,2.83 0.885,5.022 2.666,6.586 1.781,1.559 4.084,2.338 6.92,2.338 2.748,0 4.992,-0.762 6.732,-2.291 1.74,-1.529 2.608,-3.686 2.608,-6.457  [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path72" /></g><g
+           id="g74"
+           transform="translate(103.3252,39.6152)"><path
+             d="m 0,0 0,16.799 3.135,0 0,-2.543 c 0.797,1.189 1.535,1.974 2.209,2.355 0.679,0.381 1.424,0.569 2.238,0.569 1.172,0 2.361,-0.305 3.574,-0.914 L 9.955,13.623 c -0.85,0.41 -1.699,0.615 -2.549,0.615 -0.761,0 -1.447,-0.187 -2.051,-0.562 C 4.752,13.301 4.318,12.785 4.061,12.117 3.674,11.104 3.48,9.996 3.48,8.795 L 3.48,0 0,0 z"
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path76" /></g><g
+           id="g78"
+           transform="translate(118.7822,49.6289)"><path
+             d="M 0,0 11.467,0 C 11.314,1.412 10.875,2.473 10.154,3.182 9.047,4.277 7.605,4.828 5.842,4.828 4.242,4.828 2.9,4.389 1.811,3.51 0.721,2.637 0.117,1.465 0,0 m 11.432,-4.605 3.591,-0.364 c -0.562,-1.717 -1.617,-3.052 -3.146,-4.002 -1.535,-0.949 -3.498,-1.424 -5.883,-1.424 -3,0 -5.385,0.756 -7.142,2.274 -1.758,1.512 -2.637,3.633 -2.637,6.363 0,2.83 0.885,5.022 2.666,6.586 1.781,1.559 4.084,2.338 6.92,2.338 2.748,0 4.992,-0.762 6.732,-2.291 1.74,-1.529 2.608,-3.686 2.608,-6.457  [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path80" /></g><g
+           id="g82"
+           transform="translate(140.1045,48)"><path
+             d="m 0,0 c 0,-2.15 0.551,-3.762 1.658,-4.828 1.108,-1.061 2.42,-1.594 3.926,-1.594 1.523,0 2.812,0.51 3.879,1.524 1.06,1.019 1.594,2.572 1.594,4.658 0,2.303 -0.539,3.99 -1.623,5.062 C 8.35,5.9 7.014,6.439 5.432,6.439 3.885,6.439 2.59,5.924 1.553,4.887 0.516,3.855 0,2.227 0,0 m 10.998,-8.385 0,2.121 c -1.301,-1.67 -3.217,-2.502 -5.742,-2.502 -1.635,0 -3.141,0.37 -4.512,1.108 -1.377,0.738 -2.437,1.769 -3.193,3.094 C -3.205,-3.24 -3.58,-1.717 -3.58,0 c 0,1.676 0.34,3.199 1.025, [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path84" /></g><g
+           id="g86"
+           transform="translate(173.1924,48.1406)"><path
+             d="m 0,0 c 0,-2.016 0.334,-3.469 1.002,-4.365 1.096,-1.465 2.578,-2.198 4.447,-2.198 1.524,0 2.836,0.54 3.949,1.624 1.108,1.078 1.659,2.689 1.659,4.828 0,2.197 -0.528,3.814 -1.594,4.857 C 8.402,5.789 7.113,6.316 5.607,6.316 4.084,6.316 2.771,5.771 1.664,4.693 0.551,3.609 0,2.045 0,0 m 0.035,-8.525 -3.228,0 0,23.191 3.48,0 0,-8.273 c 1.471,1.505 3.346,2.261 5.631,2.261 1.26,0 2.455,-0.205 3.586,-0.621 1.125,-0.422 2.057,-1.002 2.783,-1.758 0.727,-0.755 1.295,-1.664 1.711,-2.7 [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path88" /></g><g
+           id="g90"
+           transform="translate(191.4971,33.1465)"><path
+             d="M 0,0 -0.387,2.672 C 0.375,2.502 1.037,2.42 1.605,2.42 2.379,2.42 3,2.525 3.463,2.736 3.926,2.947 4.307,3.24 4.605,3.621 4.822,3.908 5.174,4.611 5.666,5.742 5.73,5.9 5.836,6.129 5.977,6.439 l -7.793,16.829 3.75,0 4.271,-9.727 C 6.762,12.305 7.26,11.01 7.699,9.65 c 0.399,1.307 0.873,2.584 1.43,3.827 l 4.389,9.791 3.48,0 L 9.188,6.182 C 8.35,4.336 7.699,3.07 7.23,2.373 6.615,1.436 5.906,0.744 5.104,0.305 4.307,-0.129 3.352,-0.352 2.244,-0.352 1.57,-0.352 0.826,-0.234 0,0"
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path92" /></g><g
+           id="g94"
+           transform="translate(236.3447,-0.8613)"><path
+             d="m 0,0 c -0.621,7.82 -0.945,15.641 -1.166,23.461 -0.139,3.91 -0.169,7.82 -0.25,11.73 l -0.083,11.731 0.079,11.73 c 0.083,3.911 0.109,7.821 0.249,11.731 0.22,7.82 0.547,15.64 1.171,23.461 0.624,-7.821 0.951,-15.641 1.171,-23.461 0.14,-3.91 0.166,-7.82 0.249,-11.731 L 1.499,46.922 1.416,35.191 C 1.335,31.281 1.305,27.371 1.166,23.461 0.945,15.641 0.621,7.82 0,0"
+             style="fill:#515257;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path96" /></g></g></g></g></svg>
\ No newline at end of file
diff --git a/assets/logo.png b/assets/logo.png
new file mode 100644
index 0000000..49a7bce
Binary files /dev/null and b/assets/logo.png differ
diff --git a/assets/logo.svg b/assets/logo.svg
new file mode 100644
index 0000000..5af9e22
--- /dev/null
+++ b/assets/logo.svg
@@ -0,0 +1,76 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   version="1.1"
+   width="900"
+   height="250"
+   id="svg2"
+   xml:space="preserve"><metadata
+     id="metadata8"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs6"><clipPath
+       id="clipPath16"><path
+         d="M 0,200 720,200 720,0 0,0 0,200 z"
+         id="path18" /></clipPath></defs><g
+     transform="matrix(1.25,0,0,-1.25,0,250)"
+     id="g10"><g
+       id="g12"><g
+         clip-path="url(#clipPath16)"
+         id="g14"><g
+           transform="translate(63.7012,122.0288)"
+           id="g20"><path
+             d="m 0,0 -40.188,0 0,-14.848 36.756,0 c 5.368,0 9.031,-0.935 10.988,-2.804 1.958,-1.87 2.936,-5.445 2.936,-10.724 l 0,-6.863 c 0,-5.279 -0.957,-8.853 -2.87,-10.723 -1.914,-1.87 -5.599,-2.805 -11.054,-2.805 l -34.842,0 c -5.455,0 -9.162,0.935 -11.12,2.805 -1.958,1.87 -2.936,5.444 -2.936,10.723 l 0,1.584 9.898,2.046 0,-8.513 43.224,0 0,16.101 -36.889,0 c -5.411,0 -9.085,0.936 -11.02,2.805 -1.936,1.87 -2.904,5.445 -2.904,10.723 l 0,5.412 c 0,5.235 0.968,8.81 2.904,10.723 1.935, [...]
+             id="path22"
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
+           transform="translate(152.0732,122.0288)"
+           id="g24"><path
+             d="m 0,0 -39.99,0 0,-40.122 39.99,0 0,13.66 10.822,-2.046 0,-6.731 c 0,-5.235 -0.979,-8.799 -2.936,-10.69 -1.958,-1.892 -5.664,-2.838 -11.12,-2.838 l -33.655,0 c -5.455,0 -9.139,0.913 -11.053,2.739 -1.913,1.826 -2.871,5.422 -2.871,10.789 l 0,30.158 c 0,5.323 0.968,8.919 2.904,10.789 1.935,1.87 5.609,2.805 11.02,2.805 l 33.655,0 c 5.456,0 9.162,-0.957 11.12,-2.871 1.957,-1.913 2.936,-5.488 2.936,-10.723 l 0,-5.412 L 0,-12.274 0,0 z"
+             id="path26"
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><path
+           d="m 199.795,73.262 -10.757,0 0,57.147 10.757,0 0,-57.147 z"
+           id="path28"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
+           d="m 347.788,73.262 -10.756,0 0,57.147 10.756,0 0,-57.147 z"
+           id="path30"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
+           transform="translate(404.9463,122.0288)"
+           id="g32"><path
+             d="m 0,0 0,-48.767 -10.756,0 0,48.767 -27.057,0 0,8.513 65.001,0 L 27.188,0 0,0 z"
+             id="path34"
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><path
+           d="m 446.069,114.242 29.432,0 0,-9.898 -29.432,0 0,9.898 z"
+           id="path36"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
+           transform="translate(547.5293,98.1401)"
+           id="g38"><path
+             d="m 0,0 -31.214,0 0,-14.583 31.214,0 c 2.64,0 4.432,0.374 5.378,1.121 0.945,0.748 1.419,2.068 1.419,3.96 l 0,4.355 c 0,1.892 -0.474,3.223 -1.419,3.992 C 4.432,-0.385 2.64,0 0,0 m 0,22.965 -31.214,0 0,-13.528 31.214,0 c 2.419,0 4.069,0.308 4.949,0.924 0.88,0.616 1.32,1.671 1.32,3.167 l 0,5.411 c 0,1.496 -0.429,2.541 -1.288,3.135 C 4.124,22.668 2.463,22.965 0,22.965 m -44.939,-47.843 0,57.148 51.339,0 c 4.796,0 8.249,-0.87 10.361,-2.607 2.112,-1.738 3.168,-4.564 3.168,-8.48 l [...]
+             id="path40"
+             style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><path
+           d="m 595.471,130.41 13.857,0 0,-57.147 -13.857,0 0,57.147 z"
+           id="path42"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
+           d="m 651.386,84.216 46.062,0 0,35.701 -46.062,0 0,-35.701 z m -13.924,32.732 c 0,5.191 1.013,8.732 3.036,10.624 2.023,1.892 5.961,2.838 11.812,2.838 l 44.213,0 c 5.896,0 9.855,-0.946 11.857,-2.838 2.001,-1.892 2.991,-5.433 2.991,-10.624 l 0,-30.158 c 0,-5.235 -1.012,-8.799 -3.058,-10.69 -2.045,-1.892 -5.983,-2.838 -11.79,-2.838 l -44.213,0 c -5.851,0 -9.789,0.946 -11.812,2.838 -2.023,1.891 -3.036,5.455 -3.036,10.69 l 0,30.158 z"
+           id="path44"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
+           transform="translate(238.0605,187.9058)"
+           id="g46"><path
+             d="m 0,0 c -2.056,-42.858 6.694,-85.08 6.813,-127.865 0.013,-4.78 15.708,-1.82 15.691,4.204 C 22.387,-81.621 13.658,-40.09 15.678,2.008 15.983,8.375 0.246,5.13 0,0"
+             id="path48"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
+           transform="translate(260.583,79.9512)"
+           id="g50"><path
+             d="m 0,0 c 10.269,7.129 19.204,15.785 26.621,25.847 7.882,10.693 14.135,22.561 20.742,34.052 2.021,3.513 -1.786,6.557 -4.888,6.988 -4.099,0.569 -8.146,-1.35 -10.19,-4.906 C 20.433,41.369 9.427,21.512 -10.537,7.65 -13.405,5.66 -14.383,1.403 -11.277,-0.846 -7.892,-3.299 -3.166,-2.199 0,0"
+             id="path52"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
+           transform="translate(251.6475,76.5835)"
+           id="g54"><path
+             d="m 0,0 c 2.979,0.423 5.394,-1.374 7.73,-3.035 3.59,-2.552 7.229,-5.034 10.801,-7.611 7.694,-5.549 15.014,-11.606 22.206,-17.782 14.639,-12.57 28.594,-25.911 43.305,-38.399 4.059,-3.446 18.609,2.609 14.605,6.008 -16.079,13.65 -31.302,28.27 -47.404,41.895 C 43.175,-12.096 34.824,-5.585 26.046,0.31 22.599,2.625 19.44,5.307 15.642,7.053 11.513,8.951 6.654,8.827 2.271,8.205 -0.608,7.796 -5.774,6.377 -6.691,3.052 -7.62,-0.317 -1.795,-0.255 0,0"
+             id="path56"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g></g></g></g></svg>
\ No newline at end of file
diff --git a/assets/logo_and_powered.pdf b/assets/logo_and_powered.pdf
new file mode 100644
index 0000000..4adecec
Binary files /dev/null and b/assets/logo_and_powered.pdf differ
diff --git a/assets/vertical_powered_by.png b/assets/vertical_powered_by.png
new file mode 100644
index 0000000..5afedd5
Binary files /dev/null and b/assets/vertical_powered_by.png differ
diff --git a/assets/vertical_powered_by.svg b/assets/vertical_powered_by.svg
new file mode 100644
index 0000000..730e8b3
--- /dev/null
+++ b/assets/vertical_powered_by.svg
@@ -0,0 +1,152 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   width="487.5"
+   height="218.75"
+   xml:space="preserve"
+   sodipodi:docname="logo.pdf"><metadata
+     id="metadata8"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs6"><clipPath
+       clipPathUnits="userSpaceOnUse"
+       id="clipPath16"><path
+         d="M 0,175 390,175 390,0 0,0 0,175 z"
+         id="path18" /></clipPath></defs><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="640"
+     inkscape:window-height="480"
+     id="namedview4"
+     showgrid="false"
+     inkscape:zoom="0.66461538"
+     inkscape:cx="243.75"
+     inkscape:cy="109.375"
+     inkscape:window-x="65"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g10" /><g
+     id="g10"
+     inkscape:groupmode="layer"
+     inkscape:label="logo"
+     transform="matrix(1.25,0,0,-1.25,0,218.75)"><g
+       id="g12"><g
+         id="g14"
+         clip-path="url(#clipPath16)"><g
+           id="g20"
+           transform="translate(39.0928,67.9746)"><path
+             d="m 0,0 -21.41,0 0,-7.911 19.582,0 c 2.859,0 4.81,-0.498 5.853,-1.494 1.043,-0.996 1.565,-2.901 1.565,-5.713 l 0,-3.656 c 0,-2.813 -0.51,-4.717 -1.529,-5.713 -1.02,-0.996 -2.983,-1.494 -5.889,-1.494 l -18.563,0 c -2.906,0 -4.88,0.498 -5.923,1.494 -1.043,0.996 -1.565,2.9 -1.565,5.713 l 0,0.843 5.274,1.09 0,-4.535 23.027,0 0,8.578 -19.652,0 c -2.883,0 -4.84,0.498 -5.872,1.494 -1.031,0.996 -1.546,2.901 -1.546,5.713 l 0,2.883 c 0,2.79 0.515,4.694 1.546,5.714 1.032,1.019 2.989,1 [...]
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path22" /></g><g
+           id="g24"
+           transform="translate(86.1729,67.9746)"><path
+             d="m 0,0 -21.305,0 0,-21.376 21.305,0 0,7.277 5.766,-1.089 0,-3.586 c 0,-2.789 -0.522,-4.688 -1.565,-5.696 -1.043,-1.008 -3.017,-1.511 -5.924,-1.511 l -17.929,0 c -2.907,0 -4.869,0.486 -5.889,1.459 -1.02,0.972 -1.529,2.888 -1.529,5.748 l 0,16.066 c 0,2.837 0.515,4.753 1.547,5.749 1.031,0.996 2.988,1.494 5.871,1.494 l 17.929,0 c 2.907,0 4.881,-0.51 5.924,-1.529 1.043,-1.02 1.565,-2.924 1.565,-5.714 l 0,-2.883 L 0,-6.54 0,0 z"
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path26" /></g><path
+           d="m 111.597,41.993 -5.73,0 0,30.446 5.73,0 0,-30.446 z"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path28" /><path
+           d="m 190.44,41.993 -5.73,0 0,30.446 5.73,0 0,-30.446 z"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path30" /><g
+           id="g32"
+           transform="translate(220.8916,67.9746)"><path
+             d="m 0,0 0,-25.981 -5.73,0 0,25.981 -14.415,0 0,4.535 34.629,0 L 14.484,0 0,0 z"
+             style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path34" /></g><path
+           d="m 242.8,63.825 15.68,0 0,-5.273 -15.68,0 0,5.273 z"
+           style="fill:#110f0d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path36" /><g
+           id="g38"
+           transform="translate(296.8525,55.2471)"><path
+             d="m 0,0 -16.629,0 0,-7.77 16.629,0 c 1.406,0 2.361,0.2 2.865,0.598 0.504,0.399 0.756,1.102 0.756,2.109 l 0,2.321 c 0,1.008 -0.252,1.717 -0.756,2.127 C 2.361,-0.205 1.406,0 0,0 m 0,12.235 -16.629,0 0,-7.208 16.629,0 c 1.289,0 2.168,0.164 2.637,0.493 0.468,0.328 0.703,0.89 0.703,1.687 l 0,2.883 c 0,0.798 -0.229,1.354 -0.686,1.671 C 2.197,12.077 1.313,12.235 0,12.235 m -23.941,-25.489 0,30.446 27.351,0 c 2.555,0 4.395,-0.463 5.52,-1.388 1.125,-0.926 1.687,-2.432 1.687,-4.518 l [...]
+             style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path40" /></g><path
+           d="m 322.394,72.439 7.383,0 0,-30.446 -7.383,0 0,30.446 z"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path42" /><path
+           d="m 352.183,47.829 24.539,0 0,19.021 -24.539,0 0,-19.021 z m -7.418,17.438 c 0,2.766 0.539,4.653 1.617,5.661 1.078,1.008 3.176,1.511 6.293,1.511 l 23.554,0 c 3.141,0 5.25,-0.503 6.317,-1.511 1.066,-1.008 1.594,-2.895 1.594,-5.661 l 0,-16.067 c 0,-2.789 -0.539,-4.687 -1.629,-5.695 -1.09,-1.008 -3.188,-1.512 -6.282,-1.512 l -23.554,0 c -3.117,0 -5.215,0.504 -6.293,1.512 -1.078,1.008 -1.617,2.906 -1.617,5.695 l 0,16.067 z"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none"
+           id="path44" /><g
+           id="g46"
+           transform="translate(131.9814,103.0708)"><path
+             d="m 0,0 c -1.029,-22.833 3.514,-45.326 3.631,-68.121 0.014,-2.55 8.375,-0.968 8.359,2.241 C 11.875,-43.482 7.342,-21.361 8.354,1.069 8.506,4.465 0.125,2.73 0,0"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path48" /></g><g
+           id="g50"
+           transform="translate(143.9814,45.5576)"><path
+             d="m 0,0 c 11.68,8.213 18.27,19.783 25.232,31.911 2.397,4.173 -6.113,4.452 -8.033,1.11 C 10.869,21.998 5,11.539 -5.613,4.076 -10.254,0.813 -3.494,-2.457 0,0"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path52" /></g><g
+           id="g54"
+           transform="translate(138.6885,43.7627)"><path
+             d="m 0,0 c 3.75,0.387 9.623,-5.463 12.449,-7.592 3.766,-2.834 7.356,-5.9 10.91,-8.99 7.207,-6.262 14.143,-12.828 21.414,-19.02 2.159,-1.837 9.915,1.385 7.782,3.202 C 43.48,-24.674 34.883,-16.408 25.732,-8.771 18.938,-3.102 10.674,5.348 1.211,4.371 -2.76,3.963 -5.814,-0.6 0,0"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path56" /></g><g
+           id="g58"
+           transform="translate(43.2412,149.915)"><path
+             d="m 0,0 8.549,0 c 2.83,0 4.84,0.434 6.023,1.295 1.19,0.861 1.782,2.074 1.782,3.633 0,1.136 -0.346,2.103 -1.049,2.912 -0.698,0.803 -1.617,1.336 -2.76,1.594 -0.738,0.158 -2.098,0.24 -4.084,0.24 L 0,9.674 0,0 z m -4.377,-14.191 0,27.058 12.475,0 c 2.197,0 3.873,-0.088 5.033,-0.258 1.623,-0.222 2.982,-0.644 4.084,-1.265 1.096,-0.621 1.98,-1.494 2.648,-2.614 0.668,-1.119 1.002,-2.349 1.002,-3.691 0,-2.297 -0.89,-4.248 -2.683,-5.842 -1.787,-1.593 -5.022,-2.39 -9.698,-2.39 l -8.48 [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path60" /></g><g
+           id="g62"
+           transform="translate(73.0889,148.8486)"><path
+             d="m 0,0 c 0,-3.264 1.072,-5.83 3.217,-7.705 2.144,-1.881 4.828,-2.818 8.062,-2.818 3.293,0 6.006,0.949 8.133,2.841 2.127,1.899 3.193,4.582 3.193,8.069 0,2.203 -0.457,4.125 -1.365,5.765 -0.908,1.647 -2.244,2.918 -3.996,3.821 -1.752,0.908 -3.715,1.359 -5.894,1.359 -3.1,0 -5.766,-0.873 -7.998,-2.613 C 1.119,6.979 0,4.072 0,0 m -4.512,0.053 c 0,4.494 1.477,8.01 4.424,10.547 2.947,2.543 6.75,3.814 11.414,3.814 3.053,0 5.807,-0.598 8.256,-1.793 2.449,-1.189 4.318,-2.859 5.607,-4. [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path64" /></g><g
+           id="g66"
+           transform="translate(111.8545,135.7236)"><path
+             d="m 0,0 -8.777,27.059 4.488,0 L 0.744,9.322 C 1.283,7.465 1.752,5.619 2.145,3.785 2.982,6.674 3.48,8.344 3.633,8.783 l 6.293,18.276 5.279,0 4.734,-13.694 c 1.19,-3.398 2.045,-6.592 2.573,-9.58 0.422,1.711 0.972,3.674 1.646,5.889 l 5.192,17.385 4.4,0 L 24.68,0 20.461,0 13.488,20.619 c -0.586,1.723 -0.931,2.777 -1.037,3.17 -0.346,-1.242 -0.668,-2.297 -0.967,-3.17 L 4.465,0 0,0 z"
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path68" /></g><g
+           id="g70"
+           transform="translate(150.0049,135.7236)"><path
+             d="m 0,0 0,27.059 23.912,0 0,-3.194 -19.535,0 0,-8.285 18.293,0 0,-3.176 -18.293,0 0,-9.211 20.303,0 L 24.68,0 0,0 z"
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path72" /></g><g
+           id="g74"
+           transform="translate(185.4072,150.8408)"><path
+             d="m 0,0 9.404,0 c 2.004,0 3.569,0.17 4.694,0.504 1.131,0.34 1.986,0.885 2.572,1.629 0.586,0.744 0.879,1.553 0.879,2.426 0,1.277 -0.569,2.332 -1.705,3.158 -1.131,0.82 -2.93,1.236 -5.379,1.236 L 0,8.953 0,0 z m -4.377,-15.117 0,27.058 14.66,0 c 2.947,0 5.192,-0.246 6.727,-0.732 1.529,-0.486 2.76,-1.342 3.674,-2.572 0.92,-1.231 1.377,-2.59 1.377,-4.078 0,-1.922 -0.762,-3.539 -2.28,-4.858 -1.517,-1.312 -3.861,-2.15 -7.037,-2.508 1.16,-0.457 2.039,-0.902 2.637,-1.347 1.283,-0.96 [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path76" /></g><g
+           id="g78"
+           transform="translate(214.6455,135.7236)"><path
+             d="m 0,0 0,27.059 23.912,0 0,-3.194 -19.535,0 0,-8.285 18.293,0 0,-3.176 -18.293,0 0,-9.211 20.303,0 L 24.68,0 0,0 z"
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path80" /></g><g
+           id="g82"
+           transform="translate(249.9775,138.917)"><path
+             d="m 0,0 7.061,0 c 2.179,0 3.89,0.164 5.132,0.498 1.243,0.334 2.233,0.797 2.965,1.4 1.037,0.85 1.846,1.993 2.426,3.428 0.58,1.43 0.873,3.17 0.873,5.215 0,2.83 -0.568,5.004 -1.705,6.522 -1.137,1.517 -2.514,2.537 -4.143,3.052 -1.171,0.369 -3.058,0.557 -5.66,0.557 L 0,20.672 0,0 z m -4.377,-3.193 0,27.058 11.397,0 c 2.566,0 4.529,-0.129 5.882,-0.386 1.899,-0.358 3.516,-1.002 4.852,-1.94 1.746,-1.207 3.053,-2.748 3.914,-4.623 0.867,-1.875 1.301,-4.02 1.301,-6.434 0,-2.056 -0.299 [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path84" /></g><g
+           id="g86"
+           transform="translate(296.4541,138.917)"><path
+             d="m 0,0 8.238,0 c 1.412,0 2.403,0.041 2.977,0.129 1.008,0.146 1.851,0.392 2.525,0.738 0.68,0.346 1.231,0.844 1.67,1.506 0.434,0.656 0.656,1.418 0.656,2.279 0,1.008 -0.316,1.887 -0.949,2.631 -0.633,0.744 -1.506,1.266 -2.631,1.565 -1.119,0.304 -2.73,0.457 -4.834,0.457 L 0,9.305 0,0 z m 0,12.498 7.154,0 c 1.94,0 3.328,0.1 4.172,0.311 1.113,0.269 1.951,0.72 2.514,1.347 0.568,0.627 0.849,1.412 0.849,2.362 0,0.896 -0.263,1.687 -0.791,2.373 -0.527,0.685 -1.277,1.148 -2.255,1.4 -0. [...]
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path88" /></g><g
+           id="g90"
+           transform="translate(332.624,135.7236)"><path
+             d="m 0,0 0,11.461 -12.744,15.598 5.326,0 6.516,-8.157 c 1.207,-1.529 2.326,-3.052 3.363,-4.582 0.99,1.418 2.197,3.012 3.609,4.782 l 6.405,7.957 5.097,0 L 4.377,11.461 4.377,0 0,0 z"
+             style="fill:#a5a7a9;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path92" /></g><g
+           id="g94"
+           transform="translate(6.3506,118.7959)"><path
+             d="m 0,0 c 15.52,0.338 31.041,0.484 46.563,0.731 l 46.562,0.417 c 15.52,0.181 31.041,0.147 46.561,0.215 l 46.562,0.135 46.563,-0.133 C 248.332,1.294 263.852,1.336 279.373,1.154 L 325.936,0.735 C 341.457,0.486 356.977,0.34 372.498,0 356.977,-0.34 341.457,-0.486 325.936,-0.735 l -46.563,-0.419 c -15.521,-0.182 -31.041,-0.14 -46.562,-0.211 l -46.563,-0.133 -46.562,0.135 c -15.52,0.068 -31.041,0.034 -46.561,0.215 L 46.563,-0.731 C 31.041,-0.484 15.52,-0.338 0,0"
+             style="fill:#515257;fill-opacity:1;fill-rule:nonzero;stroke:none"
+             id="path96" /></g></g></g></g></svg>
\ No newline at end of file
diff --git a/checklist.py b/checklist.py
new file mode 100755
index 0000000..00e8875
--- /dev/null
+++ b/checklist.py
@@ -0,0 +1,387 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+import os
+import os.path
+import subprocess
+import sys
+import ast
+
+import dateutil.parser
+
+
+def main():
+    """Go on a power trip by nitpicking the scikit-bio repo.
+
+    Attempts to find things that are wrong with the repo -- these are usually
+    annoying details introduced by human error. The code goes out of its way
+    to nitpick as much as possible in order to maximize the effectiveness of
+    the power trip.
+
+    Returns
+    -------
+    int
+        Return code: 0 if there were no validation errors, 1 otherwise. Useful
+        as an exit code (e.g. for use with ``sys.exit``).
+
+    """
+    root = 'skbio'
+    validators = [InitValidator(), ExecPermissionValidator(),
+                  GeneratedCythonValidator(), APIRegressionValidator()]
+
+    return_code = 0
+    for validator in validators:
+        success, msg = validator.validate(root)
+
+        if not success:
+            return_code = 1
+            sys.stderr.write('\n'.join(msg))
+            sys.stderr.write('\n\n')
+
+    return return_code
+
+
+class RepoValidator(object):
+    """Abstract base class representing a repository validator.
+
+    Subclasses must override and implement ``_validate`` (see its docstring for
+    more details).
+
+    Subclasses should also provide a ``reason``: this is a string describing
+    the reason for a particular type of validation failure (see subclasses for
+    examples). ``reason`` is included in the validation error message/report
+    created by ``validate``.
+
+    """
+    reason = ''
+
+    def validate(self, root):
+        """Validate a directory tree recursively.
+
+        Parameters
+        ----------
+        root : str
+            Root directory to validate recursively.
+
+        Returns
+        -------
+        tuple of (bool, list of str)
+            First element is a ``bool`` indicating success status: ``True`` if
+            `root` passed validation, ``False`` if there were any errors.
+            Second element is a list of strings containing the validation error
+            message.
+
+        """
+        invalids = []
+        for root, dirs, files in os.walk(root):
+            result = self._validate(root, dirs, files)
+            invalids.extend(result)
+
+        success = True
+        msg = []
+        if invalids:
+            success = False
+            msg.append(self.reason + ':')
+
+            for invalid in invalids:
+                msg.append("    %s" % invalid)
+
+        return success, msg
+
+    def _validate(self, root, dirs, files):
+        """Validate a single directory.
+
+        Subclasses must override and implement this method. The method is
+        supplied with the three values yielded by ``os.walk``.
+
+        Parameters
+        ----------
+        root : str
+            Path to the current directory to be validated.
+        dirs : list of str
+            Directory names within `root`.
+        files : list of str
+            Filenames within `root`.
+
+        Returns
+        -------
+        list of str
+            List of filepaths or dirpaths to be considered invalid (i.e., that
+            did not pass the validation checks).
+
+        See Also
+        --------
+        os.walk
+
+        """
+        raise NotImplementedError("Subclasses must implement _validate.")
+
+    def _system_call(self, cmd):
+        """Issue a system call, returning stdout, stderr, and return value.
+
+        This code was taken from verman's
+        ``verman.Version.verman_system_call``. See licenses/verman.txt and
+        https://github.com/biocore/verman for more details.
+
+        """
+        proc = subprocess.Popen(cmd, shell=True, universal_newlines=True,
+                                stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+        # communicate pulls all stdout/stderr from the PIPEs to
+        # avoid blocking -- don't remove this line!
+        stdout, stderr = proc.communicate()
+        return_value = proc.returncode
+        return stdout, stderr, return_value
+
+
+class InitValidator(RepoValidator):
+    """Flag library code directories that are missing init files.
+
+    This type of validation is important mainly because it is very easy to
+    forget to add an __init__.py file to a new test directory. If this
+    happens, nose will skip those tests unless it is run from the root of the
+    source repository. Thus, the tests will be skipped if the package is
+    pip-installed, e.g., as an end-user might install a release.
+
+    Parameters
+    ----------
+    skip_dirs : iterable of str, optional
+        Directory names to skip during validation. Defaults to skipping any
+        directories named ``'data'`` or ``'__pycache__'`` (and anything
+        contained within them).
+
+    """
+    reason = "Directories missing init files"
+
+    def __init__(self, skip_dirs=None):
+        if skip_dirs is None:
+            skip_dirs = {'data', '__pycache__'}
+        self.skip_dirs = set(skip_dirs)
+
+    def _validate(self, root, dirs, files):
+        # If any of the directories yet to be visited should be skipped, remove
+        # them from ``dirs`` so that we don't visit them in a future iteration.
+        # This guarantees that ``root`` is a valid directory that should not be
+        # skipped (since we're doing a top-down walk).
+        for skip_dir in self.skip_dirs:
+            if skip_dir in dirs:
+                dirs.remove(skip_dir)
+
+        invalid_dirs = []
+        if '__init__.py' not in files:
+            invalid_dirs.append(root)
+        return invalid_dirs
+
+
+class ExecPermissionValidator(RepoValidator):
+    """Flag code files that have execute permissions.
+
+    Parameters
+    ----------
+    extensions : iterable of str, optional
+        File extensions of files to validate. Defaults to Python, Cython, and
+        C files (header and source files).
+
+    """
+    reason = "Library code with execute permissions"
+
+    def __init__(self, extensions=None):
+        if extensions is None:
+            extensions = {'.py', '.pyx', '.h', '.c'}
+        self.extensions = set(extensions)
+
+    def _validate(self, root, dirs, files):
+        invalid_fps = []
+        for f in files:
+            _, ext = os.path.splitext(f)
+
+            if ext in self.extensions:
+                fp = os.path.join(root, f)
+
+                if os.access(fp, os.X_OK):
+                    invalid_fps.append(fp)
+        return invalid_fps
+
+
+class GeneratedCythonValidator(RepoValidator):
+    """Flag Cython files that have missing or outdated generated C files.
+
+    Flags Cython files that aren't paired with an up-to-date generated C file.
+    The generated C file must be in the same directory as the Cython file, and
+    its name (besides the file extension) must match. The validator also
+    ensures that the generated C file is not empty and that it was generated at
+    the same time or later than the Cython file's timestamp.
+
+    Parameters
+    ----------
+    cython_ext : str, optional
+        File extension for Cython files.
+    c_ext : str, optional
+        File extension for generated C files.
+
+    """
+    reason = "Cython code with missing or outdated generated C code"
+
+    def __init__(self, cython_ext='.pyx', c_ext='.c'):
+        self.cython_ext = cython_ext
+        self.c_ext = c_ext
+
+    def _validate(self, root, dirs, files):
+        invalid_fps = []
+        ext_to_base = collections.defaultdict(list)
+
+        # Map from file extension to a list of basenames (without the
+        # extension).
+        for f in files:
+            base, ext = os.path.splitext(f)
+            ext_to_base[ext].append(base)
+
+        # For each Cython file, try to find a matching C file. If we have a
+        # match, make sure the C file isn't empty and that it was generated at
+        # the same time or later than the Cython file.
+        for cython_base in ext_to_base[self.cython_ext]:
+            cython_fp = os.path.join(root, cython_base + self.cython_ext)
+            c_fp = os.path.join(root, cython_base + self.c_ext)
+
+            if cython_base not in ext_to_base[self.c_ext]:
+                invalid_fps.append(cython_fp)
+            elif os.path.getsize(c_fp) <= 0:
+                invalid_fps.append(cython_fp)
+            else:
+                cython_ts = self._get_timestamp(cython_fp)
+                c_ts = self._get_timestamp(c_fp)
+
+                if c_ts < cython_ts:
+                    invalid_fps.append(cython_fp)
+
+        return invalid_fps
+
+    def _get_timestamp(self, fp):
+        cmd = 'git log -1 --format="%%ad" -- %s' % fp
+        stdout, stderr, retval = self._system_call(cmd)
+
+        if retval != 0:
+            raise RuntimeError("Could not execute 'git log' command to "
+                               "determine file timestamp.")
+        return dateutil.parser.parse(stdout.strip())
+
+
+class APIRegressionValidator(RepoValidator):
+    """Flag tests that import from a non-minimized subpackage hierarchy.
+
+    Flags tests that aren't imported from a minimally deep API target. (e.g.
+    skbio.Alignment vs skbio.alignment.Alignment). This should prevent
+    accidental regression in our API because tests will fail if any alias is
+    removed, and this checklist will fail if any test doesn't import from the
+    least deep API target.
+
+    """
+    reason = ("The following tests import `A` but should import `B`"
+              " (file: A => B)")
+
+    def __init__(self):
+        self._imports = {}
+
+    def _validate(self, root, dirs, files):
+        errors = []
+        test_imports = []
+        for file in files:
+            current_fp = os.path.join(root, file)
+            package, ext = os.path.splitext(current_fp)
+            if ext == ".py":
+                imports = self._parse_file(current_fp, root)
+                if os.path.split(root)[1] == "tests":
+                    test_imports.append((current_fp, imports))
+
+                temp = package.split(os.sep)
+                # Remove the __init__ if it is a directory import
+                if temp[-1] == "__init__":
+                    temp = temp[:-1]
+                    package = ".".join(temp)
+                    self._add_imports(imports, package)
+        for fp, imports in test_imports:
+            for import_ in imports:
+                substitute = self._minimal_import(import_)
+                if substitute is not None:
+                    errors.append("%s: %s => %s" %
+                                  (fp, import_, substitute))
+
+        return errors
+
+    def _add_imports(self, imports, package):
+        """Add the minimum depth import to our collection."""
+        for import_ in imports:
+            value = import_
+            # The actual object imported will be the key.
+            key = import_.split(".")[-1]
+            # If package importing the behavior is shorter than its import:
+            if len(package.split('.')) + 1 < len(import_.split('.')):
+                value = ".".join([package, key])
+
+            if key in self._imports:
+                sub = self._imports[key]
+                if len(sub.split('.')) > len(value.split('.')):
+                    self._imports[key] = value
+            else:
+                self._imports[key] = value
+
+    def _minimal_import(self, import_):
+        """Given an normalized import, return a shorter substitute or None."""
+        key = import_.split(".")[-1]
+        if key not in self._imports:
+            return None
+        substitute = self._imports[key]
+        if len(substitute.split('.')) == len(import_.split('.')):
+            return None
+        else:
+            return substitute
+
+    def _parse_file(self, fp, root):
+        """Parse a file and return all normalized skbio imports."""
+        imports = []
+        with open(fp, 'U') as f:
+            # Read the file and run it through AST
+            source = ast.parse(f.read())
+            # Get each top-level element, this is where API imports should be.
+            for node in ast.iter_child_nodes(source):
+                if isinstance(node, ast.Import):
+                    # Standard imports are easy, just get the names from the
+                    # ast.Alias list `node.names`
+                    imports += [x.name for x in node.names]
+                elif isinstance(node, ast.ImportFrom):
+                    prefix = ""
+                    # Relative import handling.
+                    if node.level > 0:
+                        prefix = root
+                        extra = node.level - 1
+                        while(extra > 0):
+                            # Keep dropping...
+                            prefix = os.path.split(prefix)[0]
+                            extra -= 1
+                        # We need this in '.' form not '/'
+                        prefix = prefix.replace(os.sep, ".") + "."
+                    # Prefix should be empty unless node.level > 0
+                    if node.module is None:
+                        node.module = ""
+                    imports += [".".join([prefix + node.module, x.name])
+                                for x in node.names]
+        skbio_imports = []
+        for import_ in imports:
+            # Filter by skbio
+            if import_.split(".")[0] == "skbio":
+                skbio_imports.append(import_)
+        return skbio_imports
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 0abb57c..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,12 +0,0 @@
-python-skbio (0.2.3-1) unstable; urgency=medium
-
-  * Initial upload to Debian (Closes: #794021)
-
- -- Andreas Tille <tille at debian.org>  Wed, 29 Jul 2015 23:36:16 +0200
-
-skbio (0.2.3-0biolinux2) trusty; urgency=medium
-
-  * Initial release for QIIME 1.9.0
-  * Note that this build does include compiled code and so must be arch=any
-
- -- Tim Booth <tbooth at ceh.ac.uk>  Mon, 23 Feb 2015 17:46:31 +0000
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index ec63514..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/debian/control b/debian/control
deleted file mode 100644
index 4e20d16..0000000
--- a/debian/control
+++ /dev/null
@@ -1,50 +0,0 @@
-Source: python-skbio
-Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.org>
-Uploaders: Tim Booth <tbooth at ceh.ac.uk>,
-           Andreas Tille <tille at debian.org>
-Section: python
-Priority: optional
-Build-Depends: debhelper (>= 9),
-               python-all (>= 2.7),
-               python3,
-               dh-python,
-               python-dev,
-               python3-dev,
-               python-setuptools,
-               python3-setuptools,
-               python-numpy,
-               python3-numpy,
-               python-natsort,
-               python3-natsort,
-               python-pandas,
-               python3-pandas,
-               python-matplotlib,
-               python3-matplotlib,
-               python-scipy,
-               python3-scipy
-Standards-Version: 3.9.6
-Vcs-Browser: http://anonscm.debian.org/viewvc/debian-med/trunk/packages/python-skbio/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-med/trunk/packages/python-skbio/trunk/
-Homepage: https://github.com/biocore/scikit-bio
-
-Package: python-skbio
-Architecture: any
-Depends: ${shlibs:Depends},
-         ${misc:Depends},
-         ${python:Depends}
-Description: Python data structures, algorithms, educational resources for bioinformatics
- Scikit-bio is a Python package providing data structures, algorithms, and
- educational resources for bioinformatics.
- .
- It is used as a dependency by various projects including QIIME
-
-Package: python3-skbio
-Architecture: any
-Depends: ${shlibs:Depends},
-         ${misc:Depends},
-         ${python3:Depends}
-Description: Python3 data structures, algorithms, educational resources for bioinformatic
- Scikit-bio is a Python package providing data structures, algorithms, and
- educational resources for bioinformatics.
- .
- This is the package for Python3
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index e601bbd..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,39 +0,0 @@
-Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: scikit-bio
-Upstream-Contact: gregcaporaso at gmail.com
-Source: https://github.com/biocore/scikit-bio
-
-Files: *
-Copyright: 2013-2015 scikit-bio development team.
-License: BSD-3-clause
-
-Files: debian/*
-Copyright: 2015 Tim Booth <tbooth at ceh.ac.uk>
-                Andreas Tille <tille at debian.org>
-License: BSD-3-clause
-
-License: BSD-3-clause
- Redistribution and use in source and binary forms, with or without modification,
- are permitted provided that the following conditions are met:
- .
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
- .
- * Redistributions in binary form must reproduce the above copyright notice, this
-   list of conditions and the following disclaimer in the documentation and/or
-   other materials provided with the distribution.
- .
- * Neither the names scikit-bio, skbio, or biocore nor the names of its
-   contributors may be used to endorse or promote products derived from
-   this software without specific prior written permission.
- .
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 109e927..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/make -f
-# -*- makefile -*-
-
-# Uncomment this to turn on verbose mode.
-#export DH_VERBOSE=1
-
-export PYBUILD_NAME=skbio
-
-%:
-	dh $@ --with python2,python3 --buildsystem=pybuild
-
-override_dh_auto_test:
-	#Loads of failures at this point.
-	dh_auto_test || true
-
-override_dh_clean:
-	dh_clean
-	rm -f .t* .c*
-	find ./skbio -name '*.so' -delete
-	rm -rf .eggs
\ No newline at end of file
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index 89280e3..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=3
-https://github.com/biocore/scikit-bio/releases .*/scikit-bio/archive/([.0-9]+)\.tar\.gz
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..f4d14d2
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,183 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+
+# Turn warnings into errors.
+SPHINXOPTS    = -W
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+# In addition to removing everything from the build directory, we must also
+# remove the stubs for classes, functions, etc. that autosummary creates during
+# the build process. This differs from the original 'make clean' target that is
+# created by sphinx-quickstart.
+clean:
+	rm -rf $(BUILDDIR)/* source/generated
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/scikit-bio.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/scikit-bio.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/scikit-bio"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/scikit-bio"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/doc/README.md b/doc/README.md
new file mode 100644
index 0000000..613020f
--- /dev/null
+++ b/doc/README.md
@@ -0,0 +1,244 @@
+scikit-bio documentation
+========================
+
+This guide contains instructions for building the scikit-bio documentation, as
+well as guidelines for contributing to the documentation.
+
+**Note:** If you're only interested in viewing the scikit-bio documentation,
+visit [scikit-bio.org](http://scikit-bio.org).
+
+Building the documentation
+--------------------------
+
+To build the documentation, you'll need the following Python packages
+installed:
+
+- [Sphinx](http://sphinx-doc.org/) >= 1.2.2
+- [sphinx-bootstrap-theme](https://pypi.python.org/pypi/sphinx-bootstrap-theme/)
+
+An easy way to install the dependencies is via pip:
+
+    pip install Sphinx sphinx-bootstrap-theme
+
+Finally, you will need to install scikit-bio.
+
+**Important:** The documentation will be built for whatever version of
+scikit-bio is *currently installed* on your system (i.e., the version imported
+by ```import skbio```). This may not match the code located in this repository.
+You will need to either install this version of scikit-bio somewhere (e.g., in
+a virtualenv) or point your ```PYTHONPATH``` environment variable to this code,
+*before* building the documentation.
+
+To build the documentation, assuming you are at the top-level scikit-bio
+directory:
+
+    cd doc
+    make html
+
+The built HTML documentation will be at ```build/html/index.html```.
+
+Contributing to the documentation
+---------------------------------
+
+If you would like to contribute to the documentation, whether by adding
+something entirely new or by modifying existing documentation, please first
+review our [scikit-bio contribution guide](../CONTRIBUTING.md).
+
+Before submitting your changes, ensure that the documentation builds without
+any errors or warnings, and that there are no broken links:
+
+    make clean
+    make html
+    make linkcheck
+
+### Documentation guidelines
+
+Most of scikit-bio's API documentation is automatically generated from
+[docstrings](http://legacy.python.org/dev/peps/pep-0257/#what-is-a-docstring).
+The advantage to this approach is that users can access the documentation in an
+interactive Python session or from our website as HTML. Other output forms are
+also possible, such as PDF.
+
+scikit-bio docstrings follow the [numpydoc conventions](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt).
+This ensures that the docstrings are easily readable both from the interpreter
+and HTML, PDF, etc. Please read the numpydoc guidelines before continuing.
+
+### Documenting a module in scikit-bio
+
+In addition to following the numpydoc conventions for docstrings, we have a few
+more conventions that will ensure your documentation is correctly built and
+linked within our website, and that it maintains consistency with the rest of
+the scikit-bio docs.
+
+The easiest way to get started with documenting your code is to look at the
+docstrings in existing scikit-bio modules. A couple of modules to start with
+are ```skbio.sequence``` and ```skbio.stats.distance```. Go ahead and look
+through those now. We've structured our docs in a similar way to
+[SciPy's documentation](http://docs.scipy.org/doc/scipy/reference/), so that
+may be another good place to look for examples.
+
+We'll take a top-down approach by discussing how to document a new module that
+you'd like to add to scikit-bio (let's call it ```skbio/example.py```).
+
+#### Module docstring
+
+The first thing you'll need to add is a docstring for the module. The docstring
+should be the first thing in the file following the ```#!``` line. It should
+start with a title for the module:
+
+    #!/usr/bin/env python
+    """
+    Documentation examples (:mod:`skbio.example`)
+    =============================================
+
+It is important to include the ```:mod:``` Sphinx directive in the title, as
+this title will be included in the table of contents. Also make sure that the
+title underline is the same length as the title.
+
+We also need to include another Sphinx directive below this:
+
+    .. currentmodule:: skbio.example
+
+This directive tells Sphinx that other classes, functions, etc. that we will
+reference are located in the ```skbio.example``` module.
+
+Next, include a more detailed description of the module. For example:
+
+    This module consists of several example classes and functions to illustrate
+    the scikit-bio documentation system.
+
+Following that, list any classes, functions, and exceptions that you'd like
+documentation generated for. Note that you do *not* need to include every
+single class, function, or exception that is defined in the module. Also, you
+do not need to list class methods, as those will be automatically included in
+the generated class documentation. Only include objects that should be exposed
+as part of the public API.
+
+For example:
+
+    Classes
+    -------
+
+    .. autosummary::
+       :toctree: generated/
+
+       ExampleClass1
+       ExampleClass2
+
+    Functions
+    ---------
+
+    .. autosummary::
+       :toctree: generated/
+
+       example_function1
+       example_function2
+
+    Exceptions
+    ----------
+
+    .. autosummary::
+       :toctree: generated/
+
+       ExampleError
+
+The ```autosummary``` directives are important as they generate RST files in
+the ```generated/``` directory for each object. A single-line summary and link
+to each object is inserted into the page for you.
+
+After listing public module members, we encourage a usage example section
+showing how to use some of the module's functionality. Examples should be
+written in [doctest](http://docs.python.org/2/library/doctest.html) format so
+that they can be automatically tested (e.g., using ```nosetests
+--with-doctest``` or ```make doctest```).
+
+    Examples
+    --------
+
+    Run the ``example_function1`` function:
+
+    >>> from skbio.example import example_function1
+    >>> example_function1("hello", "world")
+    hello world!
+
+You can also embed the plots that an example generates into the built
+documentation with the ```.. plot::``` directive. For example:
+
+    .. plot::
+
+       >>> from skbio.draw.distributions import boxplots
+       >>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
+
+This will include the plot, a link to the source code used to generate the
+plot, and links to different image formats (e.g., PNG and PDF) so that users
+can easily download the plot.
+
+You're now ready to document the members of your module.
+
+#### Documenting module members
+
+When documenting the members of a module (e.g., classes, methods, attributes,
+functions, and exceptions), follow the numpydoc conventions. In addition to
+these conventions, there are a few things to keep in mind:
+
+- When documenting a class, only public methods and attributes are included in
+  the built documentation by default. If a method or attribute starts with an
+  underscore, it is assumed to be private. If you want a private method to be
+  included in the built documentation, add the following line to the method's
+  docstring:
+
+    ```
+    .. shownumpydoc
+    ```
+
+  For example, you might want to document "special" methods such as
+  ```__getitem__```, ```__str__```, etc., which would be ignored by default. We
+  recommend placing this at the end of the docstring for consistency. Note that
+  this will only work for methods; private attributes will *always* be ignored.
+
+- When documenting a class, include the ```Parameters``` and ```Attributes```
+  sections in the class docstring, instead of in the ```__init__``` docstring.
+  While numpydoc technically supports either form,
+  ```__init__``` is not included in the list of methods by default and thus
+  should have its documentation included in the class docstring.
+
+#### Including the module in the docs
+
+Until now, we've only been editing docstrings, which are attached to Python
+code. The final step is to hook up this new module's docstrings to the
+documentation build system:
+
+1. Make sure you're within the ```scikit-bio/doc``` directory.
+2. Create a new file with the same name as your module under the ```source```
+   directory. Do not include ```skbio``` as part of the name, and use
+   ```.rst``` as the suffix. For example, ```source/example.rst```.
+3. Add the following line to ```source/example.rst``` to have your module's
+   docstring pulled into the document:
+
+    ```
+    .. automodule:: skbio.example
+    ```
+
+4. Add the following line to ```source/index.rst``` to add the new page to the
+   top-level table of contents:
+
+    ```
+    example
+    ```
+
+That's it! You can now try building the documentation, which should include the
+documentation for your new module!
+
+### Documenting a subpackage in scikit-bio
+
+The process of documenting a subpackage is very similar to documenting a module
+in scikit-bio. The only difference is that the module docstring goes in the
+subpackage's ```__init__.py```.
+
+### Troubleshooting
+
+If things aren't working correctly, try running ```make clean``` and then
+rebuild the docs. If things still aren't working, try building the docs
+*without* your changes, and see if there are any Sphinx errors or warnings.
+Make note of these, and then see what new errors or warnings are generated when
+you add your changes again.
diff --git a/doc/source/_static/copybutton.js b/doc/source/_static/copybutton.js
new file mode 100644
index 0000000..168a26c
--- /dev/null
+++ b/doc/source/_static/copybutton.js
@@ -0,0 +1,60 @@
+// originally taken from scikit-learn's Sphinx theme
+$(document).ready(function() {
+    /* Add a [>>>] button on the top-right corner of code samples to hide
+     * the >>> and ... prompts and the output and thus make the code
+     * copyable. 
+     * Note: This JS snippet was taken from the official python.org
+     * documentation site.*/
+    var div = $('.highlight-python .highlight,' +
+                '.highlight-python3 .highlight,' + 
+                '.highlight-pycon .highlight')
+    var pre = div.find('pre');
+
+    // get the styles from the current theme
+    pre.parent().parent().css('position', 'relative');
+    var hide_text = 'Hide the prompts and output';
+    var show_text = 'Show the prompts and output';
+    var border_width = pre.css('border-top-width');
+    var border_style = pre.css('border-top-style');
+    var border_color = pre.css('border-top-color');
+    var button_styles = {
+        'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
+        'border-color': border_color, 'border-style': border_style,
+        'border-width': border_width, 'color': border_color, 'text-size': '75%',
+        'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em'
+    }
+
+    // create and add the button to all the code blocks that contain >>>
+    div.each(function(index) {
+        var jthis = $(this);
+        if (jthis.find('.gp').length > 0) {
+            var button = $('<span class="copybutton">>>></span>');
+            button.css(button_styles)
+            button.attr('title', hide_text);
+            jthis.prepend(button);
+        }
+        // tracebacks (.gt) contain bare text elements that need to be
+        // wrapped in a span to work with .nextUntil() (see later)
+        jthis.find('pre:has(.gt)').contents().filter(function() {
+            return ((this.nodeType == 3) && (this.data.trim().length > 0));
+        }).wrap('<span>');
+    });
+
+    // define the behavior of the button when it's clicked
+    $('.copybutton').toggle(
+        function() {
+            var button = $(this);
+            button.parent().find('.go, .gp, .gt').hide();
+            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
+            button.css('text-decoration', 'line-through');
+            button.attr('title', show_text);
+        },
+        function() {
+            var button = $(this);
+            button.parent().find('.go, .gp, .gt').show();
+            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
+            button.css('text-decoration', 'none');
+            button.attr('title', hide_text);
+        });
+});
+
diff --git a/doc/source/_static/style.css b/doc/source/_static/style.css
new file mode 100644
index 0000000..4ddba1e
--- /dev/null
+++ b/doc/source/_static/style.css
@@ -0,0 +1,77 @@
+h1, h2, h3 {
+    color: #24372C !important;
+}
+
+a {
+    color: #259D57 !important;
+}
+
+a:active, a:hover, a:focus {
+    color: #00B84D !important;
+}
+
+blockquote {
+    font-size: 14px !important;
+}
+
+cite, code {
+    padding: 1px 4px !important;
+    font-size: 90% !important;
+    color: #000 !important;
+    background-color: #F5F5F5 !important;
+    white-space: nowrap !important;
+    border-radius: 4px !important;
+    border: 1px solid #CCC !important;
+    font-family: Menlo,Monaco,Consolas,"Courier New",monospace !important;
+}
+
+.label {
+    display: table-cell !important;
+    color: #000 !important;
+    font-size: 100% !important;
+    text-align: left !important;
+    width: 6em !important;
+}
+
+table.citation {
+    border-left: 1px solid #DDD !important;
+}
+
+table.citation td > em {
+    display: none !important;
+}
+
+.table > tbody > tr > td {
+    border-top: 1px solid #DDD !important;
+}
+
+#navbar {
+    background: #24372C !important;
+    border: none !important;
+    border-bottom: 4px solid #259D57 !important;
+}
+
+#navbar a {
+    color: #FFF !important;
+}
+
+.navbar-default .navbar-nav > li > a:hover, .navbar-default .navbar-nav > li > a:focus {
+    background-color: #259D57 !important;
+}
+
+.navbar-default .navbar-nav > li > a:active {
+    background-color: #00B84D !important;
+}
+
+#navbar .dropdown-menu > li > a {
+    color: #259D57 !important;
+}
+
+#navbar .dropdown-menu > li > a:hover {
+    color: #FFF !important;
+    background-color: #00B84D !important;
+}
+
+.navbar-default .navbar-nav > .open > a, .navbar-default .navbar-nav > .open > a:hover, .navbar-default .navbar-nav > .open > a:focus {
+    background-color: #00B84D !important;
+}
diff --git a/doc/source/_templates/autosummary/attribute.rst b/doc/source/_templates/autosummary/attribute.rst
new file mode 100644
index 0000000..6a37ff0
--- /dev/null
+++ b/doc/source/_templates/autosummary/attribute.rst
@@ -0,0 +1,9 @@
+:orphan:
+
+{{ fullname }}
+{{ underline }}
+
+.. currentmodule:: {{ module }}
+
+.. autoattribute:: {{ objname }}
+
diff --git a/doc/source/_templates/autosummary/class.rst b/doc/source/_templates/autosummary/class.rst
new file mode 100644
index 0000000..daa8ff7
--- /dev/null
+++ b/doc/source/_templates/autosummary/class.rst
@@ -0,0 +1,27 @@
+{% extends "!autosummary/class.rst" %}
+
+{# Taken and modified from scipy's sphinx documentation setup (https://github.com/scipy/scipy/blob/master/doc/source/_templates/autosummary/class.rst). #}
+
+{% block methods %}
+{% if methods %}
+   .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages.
+      .. autosummary::
+         :toctree:
+      {% for item in all_methods %}
+         {{ name }}.{{ item }}
+      {%- endfor %}
+{% endif %}
+{% endblock %}
+
+{% block attributes %}
+{% if attributes %}
+   .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages.
+      .. autosummary::
+         :toctree:
+      {% for item in all_attributes %}
+         {%- if not item.startswith('_') %}
+         {{ name }}.{{ item }}
+         {%- endif -%}
+      {%- endfor %}
+{% endif %}
+{% endblock %}
diff --git a/doc/source/_templates/autosummary/method.rst b/doc/source/_templates/autosummary/method.rst
new file mode 100644
index 0000000..db18a82
--- /dev/null
+++ b/doc/source/_templates/autosummary/method.rst
@@ -0,0 +1,9 @@
+:orphan:
+
+{{ fullname }}
+{{ underline }}
+
+.. currentmodule:: {{ module }}
+
+.. automethod:: {{ objname }}
+
diff --git a/doc/source/_templates/autosummary/module.rst b/doc/source/_templates/autosummary/module.rst
new file mode 100644
index 0000000..181a439
--- /dev/null
+++ b/doc/source/_templates/autosummary/module.rst
@@ -0,0 +1,2 @@
+.. automodule:: {{ fullname }}
+
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
new file mode 100644
index 0000000..c741da4
--- /dev/null
+++ b/doc/source/_templates/layout.html
@@ -0,0 +1,35 @@
+{% extends "!layout.html" %}
+
+{# Taken and modified from sphinx-bootstrap-theme demo site
+   (https://github.com/ryan-roemer/sphinx-bootstrap-theme/blob/master/demo/source/_templates/layout.html). #}
+
+{# Add github banner (from: https://github.com/blog/273-github-ribbons). #}
+{% block header %}
+  {{ super() }}
+  <a href="https://github.com/biocore/scikit-bio"
+     class="visible-desktop hidden-xs"><img
+    id="gh-banner"
+    style="position: absolute; top: 50px; right: 0; border: 0;"
+    src="https://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png"
+    alt="Fork me on GitHub"></a>
+  <script>
+    // Adjust banner height.
+    $(function () {
+      var navHeight = $(".navbar .container").css("height");
+      $("#gh-banner").css("top", navHeight);
+    });
+  </script>
+{% endblock %}
+
+{% block footer %}
+  {{ super() }}
+  <script>
+    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+    (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+    m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+    })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+    ga('create', 'UA-6636235-9', 'scikit-bio.org');
+    ga('send', 'pageview');
+  </script>
+{% endblock %}
diff --git a/doc/source/alignment.rst b/doc/source/alignment.rst
new file mode 100644
index 0000000..f16186b
--- /dev/null
+++ b/doc/source/alignment.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.alignment
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..982ff5c
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,442 @@
+import glob
+import sys
+import os
+
+import sphinx_bootstrap_theme
+
+import skbio
+
+# NOTE: parts of this file were taken from scipy's doc/source/conf.py. See
+# scikit-bio/licenses/scipy.txt for scipy's license.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../sphinxext/numpydoc'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+needs_sphinx = '1.1'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.mathjax',
+    'numpydoc',
+    'sphinx.ext.coverage',
+    'sphinx.ext.doctest',
+    'sphinx.ext.autosummary',
+    'sphinx.ext.intersphinx'
+]
+
+# Determine if the matplotlib has a recent enough version of the
+# plot_directive.
+try:
+    from matplotlib.sphinxext import plot_directive
+except ImportError:
+    use_matplotlib_plot_directive = False
+else:
+    try:
+        use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
+    except AttributeError:
+        use_matplotlib_plot_directive = False
+
+if use_matplotlib_plot_directive:
+    extensions.append('matplotlib.sphinxext.plot_directive')
+else:
+    raise RuntimeError("You need a recent enough version of matplotlib")
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'scikit-bio'
+copyright = u'2014--, scikit-bio development team'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = skbio.__version__
+# The full version, including alpha/beta/rc tags.
+release = skbio.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+
+# Exclude this file since it is only used by autosummary to generate other RST
+# files during the build process, and it will generate sphinx errors and
+# warnings otherwise.
+exclude_patterns = ['_templates/autosummary/*.rst']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'bootstrap'
+html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {
+    # Navigation bar title. (Default: ``project`` value)
+    'navbar_title': 'scikit-bio docs',
+
+    # Render the next and previous page links in navbar. (Default: true)
+    'navbar_sidebarrel': False,
+
+    # Bootswatch (http://bootswatch.com/) theme.
+    #
+    # Options are nothing with "" (default) or the name of a valid theme
+    # such as "amelia" or "cosmo".
+    'bootswatch_theme': 'united',
+
+    # Location of link to source.
+    # Options are "nav" (default), "footer" or anything else to exclude.
+    'source_link_position': False
+}
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static/']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'scikit-biodoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'scikit-bio.tex', u'scikit-bio Documentation',
+   u'scikit-bio development team', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'scikit-bio', u'scikit-bio Documentation',
+     [u'scikit-bio development team'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'scikit-bio', u'scikit-bio Documentation',
+   u'scikit-bio development team', 'scikit-bio',
+   'Core objects, functions and statistics for working with biological data '
+   'in Python.', 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+# -- Options for autosummary ----------------------------------------------
+autosummary_generate = glob.glob('*.rst')
+
+# -- Options for numpydoc -------------------------------------------------
+# Generate plots for example sections
+numpydoc_use_plots = True
+
+#------------------------------------------------------------------------------
+# Plot
+#------------------------------------------------------------------------------
+plot_pre_code = """
+import numpy as np
+import scipy as sp
+np.random.seed(123)
+"""
+plot_include_source = True
+#plot_formats = [('png', 96), 'pdf']
+#plot_html_show_formats = False
+
+import math
+phi = (math.sqrt(5) + 1)/2
+
+font_size = 13*72/96.0  # 13 px
+
+plot_rcparams = {
+    'font.size': font_size,
+    'axes.titlesize': font_size,
+    'axes.labelsize': font_size,
+    'xtick.labelsize': font_size,
+    'ytick.labelsize': font_size,
+    'legend.fontsize': font_size,
+    'figure.figsize': (3*phi, 3),
+    'figure.subplot.bottom': 0.2,
+    'figure.subplot.left': 0.2,
+    'figure.subplot.right': 0.9,
+    'figure.subplot.top': 0.85,
+    'figure.subplot.wspace': 0.4,
+    'text.usetex': False,
+
+    # Some of our figures have legends outside the axes area. When they're
+    # rendered in an interactive context, nothing gets cut off, but when
+    # rendered in a static context (e.g., with savefig, which the plot
+    # directive uses), the legend can get cut off. Specifying 'tight' instead
+    # of 'standard' fixes the issue. See http://stackoverflow.com/a/10154763
+    'savefig.bbox': 'tight'
+}
+
+if not use_matplotlib_plot_directive:
+    import matplotlib
+    matplotlib.rcParams.update(plot_rcparams)
+
+# -----------------------------------------------------------------------------
+# Intersphinx configuration
+# -----------------------------------------------------------------------------
+intersphinx_mapping = {
+        'http://docs.python.org/dev': None,
+        'http://docs.scipy.org/doc/numpy': None,
+        'http://docs.scipy.org/doc/scipy/reference': None,
+        'http://matplotlib.org': None,
+        'http://pandas.pydata.org': None,
+        'http://www.biom-format.org':None
+}
+
+# -----------------------------------------------------------------------------
+# Source code links
+# -----------------------------------------------------------------------------
+
+import inspect
+from os.path import relpath, dirname
+
+for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
+    try:
+        __import__(name)
+        extensions.append(name)
+        break
+    except ImportError:
+        pass
+else:
+    print "NOTE: linkcode extension not found -- no links to source generated"
+
+def linkcode_resolve(domain, info):
+    """
+    Determine the URL corresponding to Python object
+    """
+    if domain != 'py':
+        return None
+
+    modname = info['module']
+    fullname = info['fullname']
+
+    submod = sys.modules.get(modname)
+    if submod is None:
+        return None
+
+    obj = submod
+    for part in fullname.split('.'):
+        try:
+            obj = getattr(obj, part)
+        except:
+            return None
+
+    try:
+        fn = inspect.getsourcefile(obj)
+    except:
+        fn = None
+    if not fn:
+        try:
+            fn = inspect.getsourcefile(sys.modules[obj.__module__])
+        except:
+            fn = None
+    if not fn:
+        return None
+
+    try:
+        source, lineno = inspect.findsource(obj)
+    except:
+        lineno = None
+
+    if lineno:
+        linespec = "#L%d" % (lineno + 1)
+    else:
+        linespec = ""
+
+    fn = relpath(fn, start=dirname(skbio.__file__))
+
+    if 'dev' in skbio.__version__:
+        return "http://github.com/biocore/scikit-bio/blob/master/skbio/%s%s" % (
+           fn, linespec)
+    else:
+        return "http://github.com/biocore/scikit-bio/blob/%s/skbio/%s%s" % (
+           skbio.__version__, fn, linespec)
+
+#------------------------------------------------------------------------------
+# linkcheck
+#------------------------------------------------------------------------------
+
+# Link-checking on Travis sometimes times out.
+linkcheck_timeout = 30
+
+
+# Add the 'copybutton' javascript, to hide/show the prompt in code
+# examples, originally taken from scikit-learn's doc/conf.py
+def setup(app):
+    app.add_javascript('copybutton.js')
+    app.add_stylesheet('style.css')
diff --git a/doc/source/development/coding_guidelines.rst b/doc/source/development/coding_guidelines.rst
new file mode 100644
index 0000000..48cee36
--- /dev/null
+++ b/doc/source/development/coding_guidelines.rst
@@ -0,0 +1,379 @@
+Coding guidelines
+=================
+
+As project size increases, consistency of the code base and documentation becomes more important. We therefore provide guidelines for code and documentation that is contributed to scikit-bio. Our goal is to create a consistent code base where:
+
+* it is easy to find relevant functionality (and to determine when functionality that you're looking for doesn't exist),
+* you can trust that the code that you're working with is sufficiently tested, and
+* names and interfaces are intuitive.
+
+**As scikit-bio is in alpha, our coding guidelines are presented here as a working draft. These guidelines are requirements for all code submitted to scikit-bio, but at this stage the guidelines themselves are malleable. If you disagree with something, or have a suggestion for something new to include, you should** `create an issue`_ **to initiate a discussion.**
+
+.. _`create an issue`: https://github.com/biocore/scikit-bio/issues
+
+What are the naming conventions? and How should I format my code?
+-----------------------------------------------------------------
+
+We adhere to the `PEP 8`_ python coding guidelines for code and documentation standards. Before submitting any code to scikit-bio, you should read these carefully and apply the guidelines in your code.
+
+.. _`PEP 8`: http://legacy.python.org/dev/peps/pep-0008/
+
+
+What should I call my variables?
+--------------------------------
+
+- *Choose the name that people will most likely guess.* Make it descriptive, but not too long: ``curr_record`` is better than ``c``, or ``curr``, or ``current_genbank_record_from_database``.
+
+- *Good names are hard to find.* Don't be afraid to change names except when they are part of interfaces that other people are also using. It may take some time working with the code to come up with reasonable names for everything: if you have unit tests, it's easy to change them, especially with global search and replace.
+
+- *Use singular names for individual things, plural names for collections.* For example, you'd expect ``self.name`` to hold something like a single string, but ``self.names`` to hold something that you could loop through like a list or dictionary. Sometimes the decision can be tricky: is ``self.index`` an integer holding a positon, or a dictionary holding records keyed by name for easy lookup? If you find yourself wondering these things, the name should probably be changed to avoid the p [...]
+
+- *Don't make the type part of the name.* You might want to change the implementation later. Use ``Records`` rather than ``RecordDict`` or ``RecordList``, etc. Don't use Hungarian Notation either (i.e. where you prefix the name with the type).
+
+- *Make the name as precise as possible.* If the variable is the path of the input file, call it ``input_fp``, not ``input`` or ``file`` (which you shouldn't use anyway, since they're keywords), and not ``infile`` (because that looks like it should be a file object, not just its name).
+
+- *Use* ``result`` *to store the value that will be returned from a method or function.* Use ``data`` for input in cases where the function or method acts on arbitrary data (e.g. sequence data, or a list of numbers, etc.) unless a more descriptive name is appropriate.
+
+- *One-letter variable names should only occur in math functions or as loop iterators with limited scope.* Limited scope covers things like ``for k in keys: print k``, where ``k`` survives only a line or two. Loop iterators should refer to the variable that they're looping through: ``for k in keys, i in items``, or ``for key in keys, item in items``. If the loop is long or there are several 1-letter variables active in the same scope, rename them.
+
+- *Limit your use of abbreviations.* A few well-known abbreviations are OK, but you don't want to come back to your code in 6 months and have to figure out what ``sptxck2`` is. It's worth it to spend the extra time typing ``species_taxon_check_2``, but that's still a horrible name: what's check number 1? Far better to go with something like ``taxon_is_species_rank`` that needs no explanation, especially if the variable is only used once or twice.
+
+Acceptable abbreviations
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following list of abbreviations can be considered well-known and used with impunity within mixed name variables, but some should not be used by themselves as they would conflict with common functions, python built-in's, or raise an exception. Do not use the following by themselves as variable names: ``dir``,  ``exp`` (a common ``math`` module function), ``in``, ``max``, and ``min``. They can, however, be used as part of a name, eg ``matrix_exp``.
+
++--------------------+--------------+
+|        Full        |  Abbreviated |
++====================+==============+
+|          alignment |          aln |
++--------------------+--------------+
+|           archaeal |         arch |
++--------------------+--------------+
+|          auxiliary |          aux |
++--------------------+--------------+
+|          bacterial |         bact |
++--------------------+--------------+
+|           citation |         cite |
++--------------------+--------------+
+|            current |         curr |
++--------------------+--------------+
+|           database |           db |
++--------------------+--------------+
+|         dictionary |         dict |
++--------------------+--------------+
+|          directory |          dir |
++--------------------+--------------+
+|    distance matrix |           dm |
++--------------------+--------------+
+|        end of file |          eof |
++--------------------+--------------+
+|         eukaryotic |          euk |
++--------------------+--------------+
+|          filepath  |           fp |
++--------------------+--------------+
+|          frequency |         freq |
++--------------------+--------------+
+|           expected |          exp |
++--------------------+--------------+
+|              index |          idx |
++--------------------+--------------+
+|              input |           in |
++--------------------+--------------+
+|            maximum |          max |
++--------------------+--------------+
+|            minimum |          min |
++--------------------+--------------+
+|      mitochondrial |           mt |
++--------------------+--------------+
+|             number |          num |
++--------------------+--------------+
+|        observation |          obs |
++--------------------+--------------+
+|           observed |          obs |
++--------------------+--------------+
+|           original |         orig |
++--------------------+--------------+
+|             output |          out |
++--------------------+--------------+
+|          parameter |        param |
++--------------------+--------------+
+|          phylogeny |        phylo |
++--------------------+--------------+
+|           previous |         prev |
++--------------------+--------------+
+|        probability |         prob |
++--------------------+--------------+
+|            protein |         prot |
++--------------------+--------------+
+|             record |          rec |
++--------------------+--------------+
+|          reference |          ref |
++--------------------+--------------+
+|           sequence |          seq |
++--------------------+--------------+
+| standard deviation |        stdev |
++--------------------+--------------+
+|         statistics |        stats |
++--------------------+--------------+
+|             string |          str |
++--------------------+--------------+
+|          structure |       struct |
++--------------------+--------------+
+|          temporary |         temp |
++--------------------+--------------+
+|               taxa |          tax |
++--------------------+--------------+
+|              taxon |          tax |
++--------------------+--------------+
+|          taxonomic |          tax |
++--------------------+--------------+
+|           taxonomy |          tax |
++--------------------+--------------+
+|           variance |          var |
++--------------------+--------------+
+
+How do I organize my modules (source files)?
+--------------------------------------------
+
+- *Have a docstring with a description of the module's functions*. If the description is long, the first line should be a short summary that makes sense on its own, separated from the rest by a newline.
+
+- *All code, including import statements, should follow the docstring.* Otherwise, the docstring will not be recognized by the interpreter, and you will not have access to it in interactive sessions (i.e. through ``obj.__doc__``) or when generating documentation with automated tools.
+
+- *Import built-in modules first, followed by third-party modules, followed by any changes to the path and your own modules.* Especially, additions to the path and names of your modules are likely to change rapidly: keeping them in one place makes them easier to find.
+
+- *Don't use* ``from module import *``, *instead use* ``from module import Name, Name2, Name3...`` *or possibly* ``import module``. This makes it *much* easier to see name collisions and to replace implementations.
+
+- If you are importing `NumPy`_, `Matplotlib`_, or another package that encourages a standard style for their import statements use them as needed for example:
+
+::
+
+    import numpy as np
+    import numpy.testing as npt
+    import pandas as pd
+
+    from matplotlib import pyplot as plt
+
+.. _`NumPy`: http://www.numpy.org/
+.. _`Matplotlib`: http://matplotlib.org/
+
+Example of module structure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The structure of your module should be similar to the example below. scikit-bio uses the `NumPy doc`_ standard for documentation. Our `doc/README.md`_ explains how to write your docstrings using the `NumPy doc`_ standards for scikit-bio:
+
+.. _`NumPy doc`: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+.. _`doc/README.md`: https://github.com/biocore/scikit-bio/blob/master/doc/README.md
+
+.. code-block:: python
+
+    r"""
+    Numbers (:mod:`skbio.numbers`)
+    ==============================
+
+    .. currentmodule:: skbio.numbers
+
+    Numbers holds a sequence of numbers, and defines several statistical
+    operations (mean, stdev, etc.) FrequencyDistribution holds a mapping from
+    items (not necessarily numbers) to counts, and defines operations such as
+    Shannon entropy and frequency normalization.
+
+
+    Classes
+    -------
+
+    .. autosummary::
+       :toctree: generated/
+
+       Numbers
+
+    """
+    # ----------------------------------------------------------------------------
+    # Copyright (c) 2013--, scikit-bio development team.
+    #
+    # Distributed under the terms of the Modified BSD License.
+    #
+    # The full license is in the file COPYING.txt, distributed with this software.
+    # ----------------------------------------------------------------------------
+
+    from __future__ import absolute_import, division, print_function
+
+    import numpy as np
+    from random import choice, random
+    from utils import indices
+
+    class Numbers(list):
+        pass    # much code deleted
+    class FrequencyDistribution(dict):
+        pass    # much code deleted
+
+
+How should I write comments?
+----------------------------
+
+- *Always update the comments when the code changes.* Incorrect comments are far worse than no comments, since they are actively misleading.
+
+- *Comments should say more than the code itself.* Examine your comments carefully: they may indicate that you'd be better off rewriting your code (especially if *renaming your variables* would allow you to get rid of the comment.) In particular, don't scatter magic numbers and other constants that have to be explained through your code. It's far better to use variables whose names are self-documenting, especially if you use the same constant more than once. Also, think about making cons [...]
+
+    +-------+------------------------------------------------------------+
+    | Wrong |       ``win_size -= 20        # decrement win_size by 20`` |
+    +-------+------------------------------------------------------------+
+    |    OK | ``win_size -= 20        # leave space for the scroll bar`` |
+    +-------+------------------------------------------------------------+
+    | Right |                             ``self._scroll_bar_size = 20`` |
+    +-------+------------------------------------------------------------+
+    |       |                      ``win_size -= self._scroll_bar_size`` |
+    +-------+------------------------------------------------------------+
+
+
+- *Use comments starting with #, not strings, inside blocks of code.*
+- *Start each method, class and function with a docstring using triple double quotes (""").* Make sure the docstring follows the `NumPy doc`_ standard.
+
+- *Always update the docstring when the code changes.* Like outdated comments, outdated docstrings can waste a lot of time. "Correct examples are priceless, but incorrect examples are worse than worthless." `Jim Fulton`_.
+
+.. _`Jim Fulton`: http://www.python.org/pycon/dc2004/papers/4/PyCon2004DocTestUnit.pdf
+
+How should I test my code ?
+---------------------------
+
+There are several different approaches for testing code in python: ``nose``, ``unittest`` and ``numpy.testing``. Their purpose is the same, to check that execution of code given some input produces a specified output. The cases to which the approaches lend themselves are different.
+
+Whatever approach is employed, the general principle is every line of code should be tested. It is critical that your code be fully tested before you draw conclusions from results it produces. For scientific work, bugs don't just mean unhappy users who you'll never actually meet: **they may mean retracted publications**.
+
+Tests are an opportunity to invent the interface(s) you want. Write the test for a method before you write the method: often, this helps you figure out what you would want to call it and what parameters it should take. It's OK to write the tests a few methods at a time, and to change them as your ideas about the interface change. However, you shouldn't change them once you've told other people what the interface is. In the spirit of this, your tests should also import the functionality t [...]
+
+Never treat prototypes as production code. It's fine to write prototype code without tests to try things out, but when you've figured out the algorithm and interfaces you must rewrite it *with tests* to consider it finished. Often, this helps you decide what interfaces and functionality you actually need and what you can get rid of.
+
+"Code a little test a little". For production code, write a couple of tests, then a couple of methods, then a couple more tests, then a couple more methods, then maybe change some of the names or generalize some of the functionality. If you have a huge amount of code where all you have to do is write the tests', you're probably closer to 30% done than 90%. Testing vastly reduces the time spent debugging, since whatever went wrong has to be in the code you wrote since the last test suite. [...]
+
+Run the test suite when you change `anything`. Even if a change seems trivial, it will only take a couple of seconds to run the tests and then you'll be sure. This can eliminate long and frustrating debugging sessions where the change turned out to have been made long ago, but didn't seem significant at the time. **Note that tests are executed using Travis CI**, see `this document's section`_ for further discussion.
+
+.. _`this document's section`: https://github.com/biocore/scikit-bio/blob/master/CONTRIBUTING.md#testing-guidelines
+
+Some pointers
+^^^^^^^^^^^^^
+
+- *Use the* ``unittest`` *or the* ``nose`` *framework with tests in a separate file for each module.* Name the test file ``test_module_name.py`` and include it inside the tests folder of the module. Keeping the tests separate from the code reduces the temptation to change the tests when the code doesn't work, and makes it easy to verify that a completely new implementation presents the same interface (behaves the same) as the old.
+
+- *Always include an* ``__init__.py`` *file in your tests directory*. This is required for the module to be included when the package is built and installed via ``setup.py``.
+
+- *Always import from a minimally deep API target*. That means you would use ``from skbio import DistanceMatrix`` instead of ``from skbio.stats.distance import DistanceMatrix``. This allows us prevent most cases of accidental regression in our API.
+
+- *Use* ``numpy.testing`` *if you are doing anything with floating point numbers, arrays or permutations* (use ``numpy.testing.assert_almost_equal``). Do *not* try to compare floating point numbers using ``assertEqual`` if you value your sanity.
+
+- *Test the interface of each class in your code by defining at least one* ``TestCase`` *with the name* ``ClassNameTests``. This should contain tests for everything in the public interface.
+
+- *If the class is complicated, you may want to define additional tests with names* ``ClassNameTests_test_type``. These might subclass ``ClassNameTests`` in order to share ``setUp`` methods, etc.
+
+- *Tests of private methods should be in a separate* ``TestCase`` *called* ``ClassNameTests_private``. Private methods may change if you change the implementation. It is not required that test cases for private methods pass when you change things (that's why they're private, after all), though it is often useful to have these tests for debugging.
+
+- *Test `all` the methods in your class.* You should assume that any method you haven't tested has bugs. The convention for naming tests is ``test_method_name``. Any leading and trailing underscores on the method name can be ignored for the purposes of the test; however, *all tests must start with the literal substring* ``test`` *for* ``unittest`` and ``nose`` *to find them.* If the method is particularly complex, or has several discretely different cases you need to check, use ``test_me [...]
+
+- *Docstrings for testing methods should be considered optional*, instead the description of what the method does should be included in the name itself, therefore the name should be descriptive enough such that when running ``nose -v`` you can immediately see the file and test method that's failing.
+
+.. code-block:: none
+
+    $ nosetests -v
+    skbio.maths.diversity.alpha.tests.test_ace.test_ace ... ok
+    test_berger_parker_d (skbio.maths.diversity.alpha.tests.test_base.BaseTests) ... ok
+
+    ----------------------------------------------------------------------
+    Ran 2 tests in 0.1234s
+
+    OK
+
+- *Module-level functions should be tested in their own* ``TestCase``\ *, called* ``modulenameTests``. Even if these functions are simple, it's important to check that they work as advertised.
+
+- *It is much more important to test several small cases that you can check by hand than a single large case that requires a calculator.* Don't trust spreadsheets for numerical calculations -- use R instead!
+
+- *Make sure you test all the edge cases: what happens when the input is None, or '', or 0, or negative?* What happens at values that cause a conditional to go one way or the other? Does incorrect input raise the right exceptions? Can your code accept subclasses or superclasses of the types it expects? What happens with very large input?
+
+- *To test permutations, check that the original and shuffled version are different, but that the sorted original and sorted shuffled version are the same.* Make sure that you get *different* permutations on repeated runs and when starting from different points.
+
+- *To test random choices, figure out how many of each choice you expect in a large sample (say, 1000 or a million) using the binomial distribution or its normal approximation.* Run the test several times and check that you're within, say, 3 standard deviations of the mean.
+
+- All tests that depend on a random value should be seeded, for example if using NumPy, `numpy.random.seed(0)` should be used, in any other case the appropriate API should be used to create consistent outputs between runs. It is preferable that you do this for each test case instead of doing it in the `setUp` function/method (if any exists).
+
+- Stochastic failures should occur less than 1/10,1000 times, otherwise you risk adding a significant amount of time to the total running time of the test suite.
+
+Example of a ``nose`` test module structure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: python
+
+    #!/usr/bin/env python
+    from __future__ import division
+
+    # ----------------------------------------------------------------------------
+    # Copyright (c) 2013--, scikit-bio development team.
+    #
+    # Distributed under the terms of the Modified BSD License.
+    #
+    # The full license is in the file COPYING.txt, distributed with this software.
+    # ----------------------------------------------------------------------------
+
+    import numpy as np
+    from nose.tools import assert_almost_equal, assert_raises
+
+    from skbio.math.diversity.alpha.ace import ace
+
+
+    def test_ace():
+        assert_almost_equal(ace(np.array([2, 0])), 1.0)
+        assert_almost_equal(ace(np.array([12, 0, 9])), 2.0)
+        assert_almost_equal(ace(np.array([12, 2, 8])), 3.0)
+        assert_almost_equal(ace(np.array([12, 2, 1])), 4.0)
+        assert_almost_equal(ace(np.array([12, 1, 2, 1])), 7.0)
+        assert_almost_equal(ace(np.array([12, 3, 2, 1])), 4.6)
+        assert_almost_equal(ace(np.array([12, 3, 6, 1, 10])), 5.62749672)
+
+        # Just returns the number of OTUs when all are abundant.
+        assert_almost_equal(ace(np.array([12, 12, 13, 14])), 4.0)
+
+        # Border case: only singletons and 10-tons, no abundant OTUs.
+        assert_almost_equal(ace([0, 1, 1, 0, 0, 10, 10, 1, 0, 0]), 9.35681818182)
+
+
+    def test_ace_only_rare_singletons():
+        with assert_raises(ValueError):
+            ace([0, 0, 43, 0, 1, 0, 1, 42, 1, 43])
+
+
+    if __name__ == '__main__':
+        import nose
+        nose.runmodule()
+
+Git pointers
+------------
+
+Commit messages are a useful way to document the changes being made to a project, it additionally documents who is making these changes and when are these changes being made, all of which are relevant when tracing back problems.
+
+Authoring a commit message
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The most important metadata in a commit message is (arguably) the author's name and the author's e-mail. GitHub uses this information to attribute your contributions to a project, see for example the `scikit-bio list of contributors`_.
+
+.. _`scikit-bio list of contributors`: https://github.com/biocore/scikit-bio/graphs/contributors
+
+Follow `this guide`_ to set up your system and **make sure the e-mail you use in this step is the same e-mail associated to your GitHub account**.
+
+.. _`this guide`: http://git-scm.com/book/en/Getting-Started-First-Time-Git-Setup
+
+After doing this you should see your name and e-mail when you run the following commands:
+
+.. code-block:: none
+
+    $ git config --global user.name
+    Yoshiki Vázquez Baeza
+    $ git config --global user.email
+    yoshiki89 at gmail.com
+
+Writting a commit message
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In general the writing of a commit message should adhere to `NumPy's guidelines`_ which if followed correctly will help you structure your changes better i. e. bug fixes will be in a commit followed by a commit updating the test suite and with one last commit that update the documentation as needed.
+
+GitHub provides a set of handy features that will link together a commit message to a ticket in the issue tracker, this is specially helpful because you can `close an issue automatically`_ when the change is merged into the main repository, this reduces the amount of work that has to be done making sure outdated issues are not open.
+
+.. _`NumPy's guidelines`: http://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message
+.. _`close an issue automatically`: https://help.github.com/articles/closing-issues-via-commit-messages
diff --git a/doc/source/development/new_module.rst b/doc/source/development/new_module.rst
new file mode 100644
index 0000000..063a5f7
--- /dev/null
+++ b/doc/source/development/new_module.rst
@@ -0,0 +1,49 @@
+Adding a new module to skbio
+############################
+
+Each module needs an `__init__.py` file and a `tests` folder that also
+contains an `__init__.py` file. For a module, a simple one may look
+like this::
+
+  r"""
+  A module (:mod:`skbio.module`)
+  ==============================
+
+  .. currentmodule:: skbio.module
+
+  Documentation for this module.
+  """
+
+  # ----------------------------------------------------------------------------
+  # Copyright (c) 2013--, scikit-bio development team.
+  #
+  # Distributed under the terms of the Modified BSD License.
+  #
+  # The full license is in the file COPYING.txt, distributed with this software.
+  # ----------------------------------------------------------------------------
+
+  from __future__ import absolute_import, division, print_function
+
+  from numpy.testing import Tester
+  test = Tester().test
+
+Usually, some functionality from the module will be made accessible by
+importing it in `__init__.py`. It's convenient to use explicit
+relative imports (`from .implementation import compute`), so that
+functionality can be neatly separated in different files but the user
+doesn't face a deeply nested package: `from skbio.module import
+compute` instead of `from skbio.module.implementation import compute`.
+
+Inside the tests folder, a simpler `__init__.py` works fine (it is
+necessary so that all tests can be run after installation)::
+
+  # ----------------------------------------------------------------------------
+  # Copyright (c) 2013--, scikit-bio development team.
+  #
+  # Distributed under the terms of the Modified BSD License.
+  #
+  # The full license is in the file COPYING.txt, distributed with this software.
+  # ----------------------------------------------------------------------------
+
+Finally, remember to also follow the `documentation guidelines
+<https://github.com/biocore/scikit-bio/blob/master/doc/README.md#documenting-a-module-in-scikit-bio>`_.
diff --git a/doc/source/development/py3.rst b/doc/source/development/py3.rst
new file mode 100644
index 0000000..af66c34
--- /dev/null
+++ b/doc/source/development/py3.rst
@@ -0,0 +1,366 @@
+Supporting Python 2 and Python 3
+################################
+
+skbio simultaneously supports Python 2.7 and 3.3+ by writing code that
+works unchanged in both major versions.
+
+As a compatibility layer, we're using the `future <http://python-future.org/>`_
+and `six <https://pypi.python.org/pypi/six>`_ projects. future "allows you to
+use a single, clean Python 3.x-compatible codebase to support both Python 2 and
+Python 3 with minimal overhead". It includes functionality from "six, IPython,
+Jinja2, Django, and Pandas". Recent versions of the future project stopped
+bundling the six library, so we also directly depend on six (e.g., for StringIO
+compatibility).
+
+So far, these notes are based on issues that have appeared when porting
+skbio, so it is not a complete guide. Refer to the `official porting
+guide <https://docs.python.org/3/howto/pyporting.html>`_ and the
+`python-future docs <http://python-future.org/>`_ for more
+information.
+
+Importing __future__
+====================
+
+For consistency across versions, every Python file should start with
+the following imports::
+
+  # ----------------------------------------------------------------------------
+  # Copyright (c) 2013--, scikit-bio development team.
+  #
+  # Distributed under the terms of the Modified BSD License.
+  #
+  # The full license is in the file COPYING.txt, distributed with this software.
+  # ----------------------------------------------------------------------------
+
+  from __future__ import absolute_import, division, print_function
+
+Iterators
+=========
+
+Builtins
+--------
+
+Builtin iterators in Python 2 usually return lists, and have an
+alternative that returns an iterator (i.e., `range` and `xrange`,
+`items` and `iteritems`). In Python 3, only the iterator version
+exists but it uses the list-returning name (i.e., `range` and
+`items`).
+
+When iterating over the resulting object, the recommended approach
+depends on efficiency concerns:
+
+- If iteration only happens over a few items, you can use the
+  functions that exist both in Python 2 and Python 3.
+
+- If the number of iterations can be large and efficiency is
+  important, use the future package.
+
++--------------------+----------------------------+--------------------+
+|Small # of          |Efficient versions          |Notes               |
+|iterations (returns |(always iterators)          |                    |
+|lists in py2,       |                            |                    |
+|iterators in py3)   |                            |                    |
++--------------------+----------------------------+--------------------+
+|`zip`               |`future.builtins.zip`       |                    |
++--------------------+----------------------------+--------------------+
+|`range`             |`future.builtins.range`     |                    |
++--------------------+----------------------------+--------------------+
+|`map`               |`future.builtins.map`       |Prefer lists        |
+|                    |                            |comprehensions or   |
+|                    |                            |for loops in        |
+|                    |                            |general. Avoid      |
+|                    |                            |calling functions   |
+|                    |                            |that cause side     |
+|                    |                            |effects when using  |
+|                    |                            |map. Gotcha: Py3's  |
+|                    |                            |`map` stops when the|
+|                    |                            |shortest iterable is|
+|                    |                            |exhausted, but Py2's|
+|                    |                            |pads them with      |
+|                    |                            |`None` till the     |
+|                    |                            |longest iterable is |
+|                    |                            |exhausted.          |
+|                    |                            |                    |
++--------------------+----------------------------+--------------------+
+|`filter`            |`future.builtins.filter`    |                    |
+|                    |                            |                    |
+|                    |                            |                    |
++--------------------+----------------------------+--------------------+
+|`functools.reduce`  |`functools.reduce`          |Avoid using the     |
+|                    |                            |global reduce       |
+|                    |                            |available in Py2 (it|
+|                    |                            |is the same as the  |
+|                    |                            |`functools` one)    |
++--------------------+----------------------------+--------------------+
+|`d.items()`         |`future.utils.viewitems(d)` |Efficient iteration |
+|                    |                            |over d *and*        |
+|                    |                            |set-like behaviour  |
++--------------------+----------------------------+--------------------+
+|`d.values()`        |`future.utils.viewvalues(d)`|Efficient iteration |
+|                    |                            |over d *and*        |
+|                    |                            |set-like behaviour  |
++--------------------+----------------------------+--------------------+
+|`d.keys()`          |`future.utils.viewkeys(d)`  |Hardly ever needed, |
+|                    |                            |as iterating over a |
+|                    |                            |dictionary yields   |
+|                    |                            |keys (thus sorted(d)|
+|                    |                            |returns the sorted  |
+|                    |                            |keys).              |
++--------------------+----------------------------+--------------------+
+
+
+When not directly iterating over an iterator, don't write code that
+relies on list-like behaviour: you may need to cast it explicitly. The
+following snippets show some possible issues::
+
+    a = zip(...)
+    b = zip(...)
+    c = a + b  # succeeds in Py2 (list concatenation), TypeError in Py3
+
+::
+
+    s = map(int, range(2))
+    1 in s  # True (membership testing in a list is an O(n) bad idea)
+    0 in s  # True in Py2, False in Py3
+
+In Py2, `s` is a list, so clearly `(1 in [0, 1]) == True` and `(0 in
+[0, 1]) == True`. In Py3, `s` is an iterator and the items it yields
+are discarded. Let's see an example with a generator to try and make
+it more clear::
+
+    >>> s = ((i, print(i)) for i in [0, 1, 2])  # print will let us see the iteration
+    >>> (1, None) in s  # Starts iterating over s...
+    0
+    1                   # ...till it finds (1, None)
+    True
+    >>> (0, None) in s  # Continues iterating over s
+    2                   # s is exhausted
+    False               # but (0, None) isn't there
+
+
+Advancing an iterator
+---------------------
+
+Always use the next function, which is available from Python 2.6
+onwards. Never call the next method, which doesn't exist in Py3.
+
+Implementing new iterators
+--------------------------
+
+Implement the `__next__` special method, like in Py3, and decorate the
+class::
+
+    from future.utils import implements_iterator
+
+    @implements_iterator
+    class ParameterIterBase(object):
+    def __next__(self):
+        return next(self._generator)
+
+It is also possible to subclass from `future.builtins.object`. In this
+case, no decorator is needed.
+
+Changes in the standard library
+===============================
+
+To deal with modules that live under a different place, future
+provides a context manager::
+
+    # Example from future's documentation
+    from future import standard_library
+
+    with standard_library.hooks():
+        from http.client import HttpConnection
+        from itertools import filterfalse
+        import html.parser
+        import queue
+
+StringIO and BytesIO
+--------------------
+
+In Py2 there are three flavours of StringIO: a pure Python module
+(StringIO), an accelerated version (cStringIO), and another one in the
+io module. They all behave in a slightly different way, with differnt
+memory and performance characteristics. So far, we're using::
+
+    from six import StringIO
+
+It refers to `io.StringIO` in Py3, and `StringIO.StringIO` in Py2.
+
+If you need a binary file-like object (see the Text vs bytes section),
+use `six.BytesIO`, which refers to `io.BytesIO` in Py3, and `StringIO.StringIO`
+in Py2.
+
+Text vs bytes
+=============
+
+This is a fundamental change between Py2 and Py3. It is very important
+to always distinguish text from bytes.
+
+String literals that are to be treated as bytes need the `b`
+prefix. String literals that are text need either the `u` prefix or
+`from __future__ import unicode_literals` at the top.
+
+A brief introduction: Unicode, UTF-8, ASCII...
+----------------------------------------------
+
+A string can be seen as a sequence of characters. According to the
+Unicode standard, each character is represented by a code point (a
+number). For example, character `ñ` is represented by the Unicode code
+point `U+00F1`. Code points are still abstract and can be stored in a
+number of ways, including even little or big endian formats. There are
+many encodings that map code points to byte values (encode) and back
+(decode). Three important ones are ASCII, UTF-8 and latin-1:
+
+- ASCII is a 7 bit encoding that can handle a very limited range of
+  Unicode code points (not even the one corresponding to character
+  `ñ`).
+
+- UTF-8 is an encoding that can represent every Unicode character. It
+  is ASCII-compatible because code points that can also be represented
+  by ASCII are mapped to the same byte value by UTF-8 and ASCII. `ñ`
+  is represented by the byte sequence `\xC3\xB1`.
+
+- latin-1 is an ASCII-compatible 8 bit encoding that maps the first
+  256 Unicode code points to their byte values. That is, the Unicode
+  code point `U+00F1` (character `ñ`) is directly encoded as `0xF1` in
+  latin-1. The Py2 `str` type loosely worked by assuming everything
+  was encoded in latin-1.
+
+
+Text processing
+---------------
+
+    There Ain't No Such Thing As Plain Text.  -- Joel Spolsky, `The
+    Absolute Minimum Every Software Developer Absolutely, Positively
+    Must Know About Unicode and Character Sets (No Excuses!)
+    <http://www.joelonsoftware.com/articles/Unicode.html>`_, 2003.
+
+After going through Nick Coghlan's `"Processing Text Files in Python
+3"
+<https://ncoghlan_devs-python-notes.readthedocs.org/en/latest/python3/text_file_processing.html>`_
+I think the way forward is to process ASCII-like files (fasta, fastq)
+as binary files, and decode to strings some parts, if necessary. This
+is faster than processing them as text files, especially in Py3. In
+fact, it seems (from functions like `_phred_to_ascii*`) that these
+formats are in fact mixed binary and ASCII, which I think puts us in
+the same place as people dealing with `network protocols
+<https://ncoghlan_devs-python-notes.readthedocs.org/en/latest/python3/binary_protocols.html>`_:
+it's more cumbersome to do in Py3, especially before Python 3.5
+arrives, which will `reintroduce binary string interpolation
+<http://legacy.python.org/dev/peps/pep-0460/>`_).
+
+Gotchas
+-------
+
+Comparing bytes and text strings always returns `False` in Python 3
+(as they're incompatible types, and comparisons are required to
+succeed by the language)::
+
+    >>> b'GATCAT' == 'GATCAT'
+    False
+
+Calling `str` on a bytes instance returns a string with the `b` prefix
+and quotes, which will give unexpected results when using string
+formatting::
+
+    >>> "Sequence {}".format(b'GATCAT')
+    "Sequence b'GATCAT'"
+
+If you actually want to construct a text string, bytes objects need to
+be *decoded* into text. For example::
+
+    >>> "Sequence {}".format(b'GATCAT'.decode('utf-8'))
+
+If you want to efficiently construct a byte string, the most
+convenient way may be to call `b''.join(iterable of byte strings)`,
+though there are other options like using `io.BytesIO` or
+`bytearray`. For a very small number of byte strings, it may be OK to
+use the `+` operator.
+
+Run python with the `-b` flag to detect these two bug-prone usages,
+and `-bb` to turn them into exceptions.
+
+Instance checking: basestring, str, unicode, bytes, long, int
+=============================================================
+
+Strings
+-------
+
+When testing if a variable is a string use
+`six.string_types`. It refers to `basestring` in Py2 and `str` in Py3.
+`binary_type` and `text_type` are also available.
+
+Numbers
+-------
+
+The `long` type no longer exists in Py2. To test if a number is an
+integer (`int` or `long` in Py2, `int` in Py3), compare it to
+the abstract base class `Integral`::
+
+    from numbers import Integral
+    isinstance(quality, Integral)
+
+Implementing comparisons
+========================
+
+If the class you're defining has a `total ordering
+<http://en.wikipedia.org/wiki/Total_order>`_, either use
+`functools.total_ordering
+<https://docs.python.org/2.7/library/functools.html#functools.total_ordering>`_
+or implement all rich comparison methods if comparison performance is
+a bottleneck. Don't implement `__cmp__`, which was removed in Py3.
+
+However, usually only equality is important and you should only define
+`__eq__`. While compatibility with Py2 is kept, `__ne__` needs to be
+implemented too::
+
+    def __ne__(self, other):
+        """Required in Py2."""
+        return not self == other
+
+Otherwise, using the operator `!=` will lead to unexpected results in
+Py2 because it will compare identity, not equality::
+
+    class Foo(object):
+        def __eq__(self, other):
+            return True
+
+    print(Foo() != Foo())
+
+That prints `True` in Py2 (because each instance has a different `id`)
+but prints `False` in Py3 (the opposite of what `__eq__` returns,
+which is the desired behaviour).
+
+Always test that both `==` and `!=` are behaving correctly, e.g.::
+
+    def test_eq(self):
+        gc_1 = GeneticCode(self.sgc)
+        gc_2 = GeneticCode(self.sgc)
+        self.assertEqual(gc_1, gc_2)
+
+    def test_ne(self):
+        gc_1 = GeneticCode(self.sgc)
+        gc_2 = GeneticCode(self.sgc)
+        # Explicitly using !=
+        self.assertFalse(gc_1 != gc_2)
+
+Other modules
+=============
+
+Numpy
+-----
+
+Try to avoid setting dtypes to a string (i.e., use `dtype=np.float64`
+instead of `dtype='float'`, etc). It is may be safe, but some warnings
+were raised when running Python with the `-b` flag. Also, field names
+in structured dtypes need to be bytes (`str` type) in Py2, but text
+(`str` type) in Py3 (`issue #2407
+<https://github.com/numpy/numpy/issues/2407>`_).
+
+Testing
+=======
+
+`unittest.assertEquals` is deprecated. Use `unittest.assertEqual`
+instead. The complete list of deprecated testing methods is `here
+<https://docs.python.org/3.4/library/unittest.html#deprecated-aliases>`_
diff --git a/doc/source/diversity.rst b/doc/source/diversity.rst
new file mode 100644
index 0000000..1822a20
--- /dev/null
+++ b/doc/source/diversity.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.diversity
diff --git a/doc/source/draw.rst b/doc/source/draw.rst
new file mode 100644
index 0000000..18779ae
--- /dev/null
+++ b/doc/source/draw.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.draw
diff --git a/doc/source/format.sequences.rst b/doc/source/format.sequences.rst
new file mode 100644
index 0000000..522e0c0
--- /dev/null
+++ b/doc/source/format.sequences.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.format.sequences
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..17a1c63
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,37 @@
+scikit-bio
+==========
+
+scikit-bio (canonically pronounced *sigh-kit-buy-oh*) is a library for working
+with biological data in Python. scikit-bio is open source, BSD-licensed
+software that is currently under active development.
+
+API Reference
+-------------
+
+.. toctree::
+   :maxdepth: 1
+
+   alignment
+   sequence
+   tree
+   workflow
+   draw
+   format.sequences
+   diversity
+   stats
+   parse.sequences
+   io
+   util
+
+Developer Documentation
+-----------------------
+
+The developer documentation contains information for how to contribute
+to scikit-bio.
+
+.. toctree::
+   :maxdepth: 1
+
+   development/py3
+   development/coding_guidelines
+   development/new_module
diff --git a/doc/source/io.rst b/doc/source/io.rst
new file mode 100644
index 0000000..894d183
--- /dev/null
+++ b/doc/source/io.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.io
diff --git a/doc/source/parse.sequences.rst b/doc/source/parse.sequences.rst
new file mode 100644
index 0000000..c704610
--- /dev/null
+++ b/doc/source/parse.sequences.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.parse.sequences
\ No newline at end of file
diff --git a/doc/source/sequence.rst b/doc/source/sequence.rst
new file mode 100644
index 0000000..5d63f6d
--- /dev/null
+++ b/doc/source/sequence.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.sequence
diff --git a/doc/source/stats.rst b/doc/source/stats.rst
new file mode 100644
index 0000000..3a2d6e0
--- /dev/null
+++ b/doc/source/stats.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.stats
diff --git a/doc/source/tree.rst b/doc/source/tree.rst
new file mode 100644
index 0000000..ed81bf4
--- /dev/null
+++ b/doc/source/tree.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.tree
diff --git a/doc/source/util.rst b/doc/source/util.rst
new file mode 100644
index 0000000..bbb346f
--- /dev/null
+++ b/doc/source/util.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.util
diff --git a/doc/source/workflow.rst b/doc/source/workflow.rst
new file mode 100644
index 0000000..b636e4d
--- /dev/null
+++ b/doc/source/workflow.rst
@@ -0,0 +1 @@
+.. automodule:: skbio.workflow
diff --git a/doc/sphinxext/numpydoc/LICENSE.txt b/doc/sphinxext/numpydoc/LICENSE.txt
new file mode 100644
index 0000000..b15c699
--- /dev/null
+++ b/doc/sphinxext/numpydoc/LICENSE.txt
@@ -0,0 +1,94 @@
+-------------------------------------------------------------------------------
+    The files
+    - numpydoc.py
+    - docscrape.py
+    - docscrape_sphinx.py
+    - phantom_import.py
+    have the following license:
+
+Copyright (C) 2008 Stefan van der Walt <stefan at mentat.za.net>, Pauli Virtanen <pav at iki.fi>
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+-------------------------------------------------------------------------------
+    The files
+    - compiler_unparse.py
+    - comment_eater.py
+    - traitsdoc.py
+    have the following license:
+
+This software is OSI Certified Open Source Software.
+OSI Certified is a certification mark of the Open Source Initiative.
+
+Copyright (c) 2006, Enthought, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Enthought, Inc. nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-------------------------------------------------------------------------------
+    The file
+    - plot_directive.py
+    originates from Matplotlib (http://matplotlib.sf.net/) which has
+    the following license:
+
+Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved.
+
+1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98 [...]
+
+3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3.
+
+4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement.
+
diff --git a/doc/sphinxext/numpydoc/README.rst b/doc/sphinxext/numpydoc/README.rst
new file mode 100644
index 0000000..0c40af1
--- /dev/null
+++ b/doc/sphinxext/numpydoc/README.rst
@@ -0,0 +1,54 @@
+.. image:: https://travis-ci.org/numpy/numpydoc.png?branch=master
+   :target: https://travis-ci.org/numpy/numpydoc/
+
+=====================================
+numpydoc -- Numpy's Sphinx extensions
+=====================================
+
+Numpy's documentation uses several custom extensions to Sphinx.  These
+are shipped in this ``numpydoc`` package, in case you want to make use
+of them in third-party projects.
+
+The following extensions are available:
+
+  - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add
+    the code description directives ``np:function``, ``np-c:function``, etc.
+    that support the Numpy docstring syntax.
+
+  - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes.
+
+  - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::``
+    directive. Note that this implementation may still undergo severe
+    changes or eventually be deprecated.
+
+
+numpydoc
+========
+
+Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings
+following the Numpy/Scipy format to a form palatable to Sphinx.
+
+Options
+-------
+
+The following options can be set in conf.py:
+
+- numpydoc_use_plots: bool
+
+  Whether to produce ``plot::`` directives for Examples sections that
+  contain ``import matplotlib``.
+
+- numpydoc_show_class_members: bool
+
+  Whether to show all members of a class in the Methods and Attributes
+  sections automatically.
+
+- numpydoc_class_members_toctree: bool
+
+  Whether to create a Sphinx table of contents for the lists of class
+  methods and attributes. If a table of contents is made, Sphinx expects
+  each entry to have a separate page.
+
+- numpydoc_edit_link: bool  (DEPRECATED -- edit your HTML template instead)
+
+  Whether to insert an edit link after docstrings.
diff --git a/doc/sphinxext/numpydoc/numpydoc/__init__.py b/doc/sphinxext/numpydoc/numpydoc/__init__.py
new file mode 100644
index 0000000..0fce2cf
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/__init__.py
@@ -0,0 +1,3 @@
+from __future__ import division, absolute_import, print_function
+
+from .numpydoc import setup
diff --git a/doc/sphinxext/numpydoc/numpydoc/comment_eater.py b/doc/sphinxext/numpydoc/numpydoc/comment_eater.py
new file mode 100644
index 0000000..8cddd33
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/comment_eater.py
@@ -0,0 +1,169 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+if sys.version_info[0] >= 3:
+    from io import StringIO
+else:
+    from io import StringIO
+
+import compiler
+import inspect
+import textwrap
+import tokenize
+
+from .compiler_unparse import unparse
+
+
+class Comment(object):
+    """ A comment block.
+    """
+    is_comment = True
+    def __init__(self, start_lineno, end_lineno, text):
+        # int : The first line number in the block. 1-indexed.
+        self.start_lineno = start_lineno
+        # int : The last line number. Inclusive!
+        self.end_lineno = end_lineno
+        # str : The text block including '#' character but not any leading spaces.
+        self.text = text
+
+    def add(self, string, start, end, line):
+        """ Add a new comment line.
+        """
+        self.start_lineno = min(self.start_lineno, start[0])
+        self.end_lineno = max(self.end_lineno, end[0])
+        self.text += string
+
+    def __repr__(self):
+        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno, self.text)
+
+
+class NonComment(object):
+    """ A non-comment block of code.
+    """
+    is_comment = False
+    def __init__(self, start_lineno, end_lineno):
+        self.start_lineno = start_lineno
+        self.end_lineno = end_lineno
+
+    def add(self, string, start, end, line):
+        """ Add lines to the block.
+        """
+        if string.strip():
+            # Only add if not entirely whitespace.
+            self.start_lineno = min(self.start_lineno, start[0])
+            self.end_lineno = max(self.end_lineno, end[0])
+
+    def __repr__(self):
+        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno)
+
+
+class CommentBlocker(object):
+    """ Pull out contiguous comment blocks.
+    """
+    def __init__(self):
+        # Start with a dummy.
+        self.current_block = NonComment(0, 0)
+
+        # All of the blocks seen so far.
+        self.blocks = []
+
+        # The index mapping lines of code to their associated comment blocks.
+        self.index = {}
+
+    def process_file(self, file):
+        """ Process a file object.
+        """
+        if sys.version_info[0] >= 3:
+            nxt = file.__next__
+        else:
+            nxt = file.next
+        for token in tokenize.generate_tokens(nxt):
+            self.process_token(*token)
+        self.make_index()
+
+    def process_token(self, kind, string, start, end, line):
+        """ Process a single token.
+        """
+        if self.current_block.is_comment:
+            if kind == tokenize.COMMENT:
+                self.current_block.add(string, start, end, line)
+            else:
+                self.new_noncomment(start[0], end[0])
+        else:
+            if kind == tokenize.COMMENT:
+                self.new_comment(string, start, end, line)
+            else:
+                self.current_block.add(string, start, end, line)
+
+    def new_noncomment(self, start_lineno, end_lineno):
+        """ We are transitioning from a noncomment to a comment.
+        """
+        block = NonComment(start_lineno, end_lineno)
+        self.blocks.append(block)
+        self.current_block = block
+
+    def new_comment(self, string, start, end, line):
+        """ Possibly add a new comment.
+
+        Only adds a new comment if this comment is the only thing on the line.
+        Otherwise, it extends the noncomment block.
+        """
+        prefix = line[:start[1]]
+        if prefix.strip():
+            # Oops! Trailing comment, not a comment block.
+            self.current_block.add(string, start, end, line)
+        else:
+            # A comment block.
+            block = Comment(start[0], end[0], string)
+            self.blocks.append(block)
+            self.current_block = block
+
+    def make_index(self):
+        """ Make the index mapping lines of actual code to their associated
+        prefix comments.
+        """
+        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
+            if not block.is_comment:
+                self.index[block.start_lineno] = prev
+
+    def search_for_comment(self, lineno, default=None):
+        """ Find the comment block just before the given line number.
+
+        Returns None (or the specified default) if there is no such block.
+        """
+        if not self.index:
+            self.make_index()
+        block = self.index.get(lineno, None)
+        text = getattr(block, 'text', default)
+        return text
+
+
+def strip_comment_marker(text):
+    """ Strip # markers at the front of a block of comment text.
+    """
+    lines = []
+    for line in text.splitlines():
+        lines.append(line.lstrip('#'))
+    text = textwrap.dedent('\n'.join(lines))
+    return text
+
+
+def get_class_traits(klass):
+    """ Yield all of the documentation for trait definitions on a class object.
+    """
+    # FIXME: gracefully handle errors here or in the caller?
+    source = inspect.getsource(klass)
+    cb = CommentBlocker()
+    cb.process_file(StringIO(source))
+    mod_ast = compiler.parse(source)
+    class_ast = mod_ast.node.nodes[0]
+    for node in class_ast.code.nodes:
+        # FIXME: handle other kinds of assignments?
+        if isinstance(node, compiler.ast.Assign):
+            name = node.nodes[0].name
+            rhs = unparse(node.expr).strip()
+            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
+            yield name, rhs, doc
+
diff --git a/doc/sphinxext/numpydoc/numpydoc/compiler_unparse.py b/doc/sphinxext/numpydoc/numpydoc/compiler_unparse.py
new file mode 100644
index 0000000..8933a83
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/compiler_unparse.py
@@ -0,0 +1,865 @@
+""" Turn compiler.ast structures back into executable python code.
+
+    The unparse method takes a compiler.ast tree and transforms it back into
+    valid python code.  It is incomplete and currently only works for
+    import statements, function calls, function definitions, assignments, and
+    basic expressions.
+
+    Inspired by python-2.5-svn/Demo/parser/unparse.py
+
+    fixme: We may want to move to using _ast trees because the compiler for
+           them is about 6 times faster than compiler.compile.
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
+
+if sys.version_info[0] >= 3:
+    from io import StringIO
+else:
+    from StringIO import StringIO
+
+def unparse(ast, single_line_functions=False):
+    s = StringIO()
+    UnparseCompilerAst(ast, s, single_line_functions)
+    return s.getvalue().lstrip()
+
+op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
+                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
+
+class UnparseCompilerAst:
+    """ Methods in this class recursively traverse an AST and
+        output source code for the abstract syntax; original formatting
+        is disregarged.
+    """
+
+    #########################################################################
+    # object interface.
+    #########################################################################
+
+    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
+        """ Unparser(tree, file=sys.stdout) -> None.
+
+            Print the source for tree to file.
+        """
+        self.f = file
+        self._single_func = single_line_functions
+        self._do_indent = True
+        self._indent = 0
+        self._dispatch(tree)
+        self._write("\n")
+        self.f.flush()
+
+    #########################################################################
+    # Unparser private interface.
+    #########################################################################
+
+    ### format, output, and dispatch methods ################################
+
+    def _fill(self, text = ""):
+        "Indent a piece of text, according to the current indentation level"
+        if self._do_indent:
+            self._write("\n"+"    "*self._indent + text)
+        else:
+            self._write(text)
+
+    def _write(self, text):
+        "Append a piece of text to the current line."
+        self.f.write(text)
+
+    def _enter(self):
+        "Print ':', and increase the indentation."
+        self._write(": ")
+        self._indent += 1
+
+    def _leave(self):
+        "Decrease the indentation level."
+        self._indent -= 1
+
+    def _dispatch(self, tree):
+        "_dispatcher function, _dispatching tree type T to method _T."
+        if isinstance(tree, list):
+            for t in tree:
+                self._dispatch(t)
+            return
+        meth = getattr(self, "_"+tree.__class__.__name__)
+        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
+            return
+        meth(tree)
+
+
+    #########################################################################
+    # compiler.ast unparsing methods.
+    #
+    # There should be one method per concrete grammar type. They are
+    # organized in alphabetical order.
+    #########################################################################
+
+    def _Add(self, t):
+        self.__binary_op(t, '+')
+
+    def _And(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") and (")
+        self._write(")")
+
+    def _AssAttr(self, t):
+        """ Handle assigning an attribute of an object
+        """
+        self._dispatch(t.expr)
+        self._write('.'+t.attrname)
+
+    def _Assign(self, t):
+        """ Expression Assignment such as "a = 1".
+
+            This only handles assignment in expressions.  Keyword assignment
+            is handled separately.
+        """
+        self._fill()
+        for target in t.nodes:
+            self._dispatch(target)
+            self._write(" = ")
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write('; ')
+
+    def _AssName(self, t):
+        """ Name on left hand side of expression.
+
+            Treat just like a name on the right side of an expression.
+        """
+        self._Name(t)
+
+    def _AssTuple(self, t):
+        """ Tuple on left hand side of an expression.
+        """
+
+        # _write each elements, separated by a comma.
+        for element in t.nodes[:-1]:
+            self._dispatch(element)
+            self._write(", ")
+
+        # Handle the last one without writing comma
+        last_element = t.nodes[-1]
+        self._dispatch(last_element)
+
+    def _AugAssign(self, t):
+        """ +=,-=,*=,/=,**=, etc. operations
+        """
+
+        self._fill()
+        self._dispatch(t.node)
+        self._write(' '+t.op+' ')
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write(';')
+
+    def _Bitand(self, t):
+        """ Bit and operation.
+        """
+
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" & ")
+
+    def _Bitor(self, t):
+        """ Bit or operation
+        """
+
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" | ")
+
+    def _CallFunc(self, t):
+        """ Function call.
+        """
+        self._dispatch(t.node)
+        self._write("(")
+        comma = False
+        for e in t.args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._dispatch(e)
+        if t.star_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("*")
+            self._dispatch(t.star_args)
+        if t.dstar_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("**")
+            self._dispatch(t.dstar_args)
+        self._write(")")
+
+    def _Compare(self, t):
+        self._dispatch(t.expr)
+        for op, expr in t.ops:
+            self._write(" " + op + " ")
+            self._dispatch(expr)
+
+    def _Const(self, t):
+        """ A constant value such as an integer value, 3, or a string, "hello".
+        """
+        self._dispatch(t.value)
+
+    def _Decorators(self, t):
+        """ Handle function decorators (eg. @has_units)
+        """
+        for node in t.nodes:
+            self._dispatch(node)
+
+    def _Dict(self, t):
+        self._write("{")
+        for  i, (k, v) in enumerate(t.items):
+            self._dispatch(k)
+            self._write(": ")
+            self._dispatch(v)
+            if i < len(t.items)-1:
+                self._write(", ")
+        self._write("}")
+
+    def _Discard(self, t):
+        """ Node for when return value is ignored such as in "foo(a)".
+        """
+        self._fill()
+        self._dispatch(t.expr)
+
+    def _Div(self, t):
+        self.__binary_op(t, '/')
+
+    def _Ellipsis(self, t):
+        self._write("...")
+
+    def _From(self, t):
+        """ Handle "from xyz import foo, bar as baz".
+        """
+        # fixme: Are From and ImportFrom handled differently?
+        self._fill("from ")
+        self._write(t.modname)
+        self._write(" import ")
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+
+    def _Function(self, t):
+        """ Handle function definitions
+        """
+        if t.decorators is not None:
+            self._fill("@")
+            self._dispatch(t.decorators)
+        self._fill("def "+t.name + "(")
+        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
+        for i, arg in enumerate(zip(t.argnames, defaults)):
+            self._write(arg[0])
+            if arg[1] is not None:
+                self._write('=')
+                self._dispatch(arg[1])
+            if i < len(t.argnames)-1:
+                self._write(', ')
+        self._write(")")
+        if self._single_func:
+            self._do_indent = False
+        self._enter()
+        self._dispatch(t.code)
+        self._leave()
+        self._do_indent = True
+
+    def _Getattr(self, t):
+        """ Handle getting an attribute of an object
+        """
+        if isinstance(t.expr, (Div, Mul, Sub, Add)):
+            self._write('(')
+            self._dispatch(t.expr)
+            self._write(')')
+        else:
+            self._dispatch(t.expr)
+            
+        self._write('.'+t.attrname)
+        
+    def _If(self, t):
+        self._fill()
+        
+        for i, (compare,code) in enumerate(t.tests):
+            if i == 0:
+                self._write("if ")
+            else:
+                self._write("elif ")
+            self._dispatch(compare)
+            self._enter()
+            self._fill()
+            self._dispatch(code)
+            self._leave()
+            self._write("\n")
+
+        if t.else_ is not None:
+            self._write("else")
+            self._enter()
+            self._fill()
+            self._dispatch(t.else_)
+            self._leave()
+            self._write("\n")
+            
+    def _IfExp(self, t):
+        self._dispatch(t.then)
+        self._write(" if ")
+        self._dispatch(t.test)
+
+        if t.else_ is not None:
+            self._write(" else (")
+            self._dispatch(t.else_)
+            self._write(")")
+
+    def _Import(self, t):
+        """ Handle "import xyz.foo".
+        """
+        self._fill("import ")
+        
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+
+    def _Keyword(self, t):
+        """ Keyword value assignment within function calls and definitions.
+        """
+        self._write(t.name)
+        self._write("=")
+        self._dispatch(t.expr)
+        
+    def _List(self, t):
+        self._write("[")
+        for  i,node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i < len(t.nodes)-1:
+                self._write(", ")
+        self._write("]")
+
+    def _Module(self, t):
+        if t.doc is not None:
+            self._dispatch(t.doc)
+        self._dispatch(t.node)
+
+    def _Mul(self, t):
+        self.__binary_op(t, '*')
+
+    def _Name(self, t):
+        self._write(t.name)
+
+    def _NoneType(self, t):
+        self._write("None")
+        
+    def _Not(self, t):
+        self._write('not (')
+        self._dispatch(t.expr)
+        self._write(')')
+        
+    def _Or(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") or (")
+        self._write(")")
+                
+    def _Pass(self, t):
+        self._write("pass\n")
+
+    def _Printnl(self, t):
+        self._fill("print ")
+        if t.dest:
+            self._write(">> ")
+            self._dispatch(t.dest)
+            self._write(", ")
+        comma = False
+        for node in t.nodes:
+            if comma: self._write(', ')
+            else: comma = True
+            self._dispatch(node)
+
+    def _Power(self, t):
+        self.__binary_op(t, '**')
+
+    def _Return(self, t):
+        self._fill("return ")
+        if t.value:
+            if isinstance(t.value, Tuple):
+                text = ', '.join([ name.name for name in t.value.asList() ])
+                self._write(text)
+            else:
+                self._dispatch(t.value)
+            if not self._do_indent:
+                self._write('; ')
+
+    def _Slice(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        if t.lower:
+            self._dispatch(t.lower)
+        self._write(":")
+        if t.upper:
+            self._dispatch(t.upper)
+        #if t.step:
+        #    self._write(":")
+        #    self._dispatch(t.step)
+        self._write("]")
+
+    def _Sliceobj(self, t):
+        for i, node in enumerate(t.nodes):
+            if i != 0:
+                self._write(":")
+            if not (isinstance(node, Const) and node.value is None):
+                self._dispatch(node)
+
+    def _Stmt(self, tree):
+        for node in tree.nodes:
+            self._dispatch(node)
+
+    def _Sub(self, t):
+        self.__binary_op(t, '-')
+
+    def _Subscript(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        for i, value in enumerate(t.subs):
+            if i != 0:
+                self._write(",")
+            self._dispatch(value)
+        self._write("]")
+
+    def _TryExcept(self, t):
+        self._fill("try")
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+
+        for handler in t.handlers:
+            self._fill('except ')
+            self._dispatch(handler[0])
+            if handler[1] is not None:
+                self._write(', ')
+                self._dispatch(handler[1])
+            self._enter()
+            self._dispatch(handler[2])
+            self._leave()
+            
+        if t.else_:
+            self._fill("else")
+            self._enter()
+            self._dispatch(t.else_)
+            self._leave()
+
+    def _Tuple(self, t):
+
+        if not t.nodes:
+            # Empty tuple.
+            self._write("()")
+        else:
+            self._write("(")
+
+            # _write each elements, separated by a comma.
+            for element in t.nodes[:-1]:
+                self._dispatch(element)
+                self._write(", ")
+
+            # Handle the last one without writing comma
+            last_element = t.nodes[-1]
+            self._dispatch(last_element)
+
+            self._write(")")
+            
+    def _UnaryAdd(self, t):
+        self._write("+")
+        self._dispatch(t.expr)
+        
+    def _UnarySub(self, t):
+        self._write("-")
+        self._dispatch(t.expr)        
+
+    def _With(self, t):
+        self._fill('with ')
+        self._dispatch(t.expr)
+        if t.vars:
+            self._write(' as ')
+            self._dispatch(t.vars.name)
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+        self._write('\n')
+        
+    def _int(self, t):
+        self._write(repr(t))
+
+    def __binary_op(self, t, symbol):
+        # Check if parenthesis are needed on left side and then dispatch
+        has_paren = False
+        left_class = str(t.left.__class__)
+        if (left_class in op_precedence.keys() and
+            op_precedence[left_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.left)
+        if has_paren:
+            self._write(')')
+        # Write the appropriate symbol for operator
+        self._write(symbol)
+        # Check if parenthesis are needed on the right side and then dispatch
+        has_paren = False
+        right_class = str(t.right.__class__)
+        if (right_class in op_precedence.keys() and
+            op_precedence[right_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.right)
+        if has_paren:
+            self._write(')')
+
+    def _float(self, t):
+        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
+        # We prefer str here.
+        self._write(str(t))
+
+    def _str(self, t):
+        self._write(repr(t))
+        
+    def _tuple(self, t):
+        self._write(str(t))
+
+    #########################################################################
+    # These are the methods from the _ast modules unparse.
+    #
+    # As our needs to handle more advanced code increase, we may want to
+    # modify some of the methods below so that they work for compiler.ast.
+    #########################################################################
+
+#    # stmt
+#    def _Expr(self, tree):
+#        self._fill()
+#        self._dispatch(tree.value)
+#
+#    def _Import(self, t):
+#        self._fill("import ")
+#        first = True
+#        for a in t.names:
+#            if first:
+#                first = False
+#            else:
+#                self._write(", ")
+#            self._write(a.name)
+#            if a.asname:
+#                self._write(" as "+a.asname)
+#
+##    def _ImportFrom(self, t):
+##        self._fill("from ")
+##        self._write(t.module)
+##        self._write(" import ")
+##        for i, a in enumerate(t.names):
+##            if i == 0:
+##                self._write(", ")
+##            self._write(a.name)
+##            if a.asname:
+##                self._write(" as "+a.asname)
+##        # XXX(jpe) what is level for?
+##
+#
+#    def _Break(self, t):
+#        self._fill("break")
+#
+#    def _Continue(self, t):
+#        self._fill("continue")
+#
+#    def _Delete(self, t):
+#        self._fill("del ")
+#        self._dispatch(t.targets)
+#
+#    def _Assert(self, t):
+#        self._fill("assert ")
+#        self._dispatch(t.test)
+#        if t.msg:
+#            self._write(", ")
+#            self._dispatch(t.msg)
+#
+#    def _Exec(self, t):
+#        self._fill("exec ")
+#        self._dispatch(t.body)
+#        if t.globals:
+#            self._write(" in ")
+#            self._dispatch(t.globals)
+#        if t.locals:
+#            self._write(", ")
+#            self._dispatch(t.locals)
+#
+#    def _Print(self, t):
+#        self._fill("print ")
+#        do_comma = False
+#        if t.dest:
+#            self._write(">>")
+#            self._dispatch(t.dest)
+#            do_comma = True
+#        for e in t.values:
+#            if do_comma:self._write(", ")
+#            else:do_comma=True
+#            self._dispatch(e)
+#        if not t.nl:
+#            self._write(",")
+#
+#    def _Global(self, t):
+#        self._fill("global")
+#        for i, n in enumerate(t.names):
+#            if i != 0:
+#                self._write(",")
+#            self._write(" " + n)
+#
+#    def _Yield(self, t):
+#        self._fill("yield")
+#        if t.value:
+#            self._write(" (")
+#            self._dispatch(t.value)
+#            self._write(")")
+#
+#    def _Raise(self, t):
+#        self._fill('raise ')
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.inst:
+#            self._write(", ")
+#            self._dispatch(t.inst)
+#        if t.tback:
+#            self._write(", ")
+#            self._dispatch(t.tback)
+#
+#
+#    def _TryFinally(self, t):
+#        self._fill("try")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#        self._fill("finally")
+#        self._enter()
+#        self._dispatch(t.finalbody)
+#        self._leave()
+#
+#    def _excepthandler(self, t):
+#        self._fill("except ")
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.name:
+#            self._write(", ")
+#            self._dispatch(t.name)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _ClassDef(self, t):
+#        self._write("\n")
+#        self._fill("class "+t.name)
+#        if t.bases:
+#            self._write("(")
+#            for a in t.bases:
+#                self._dispatch(a)
+#                self._write(", ")
+#            self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _FunctionDef(self, t):
+#        self._write("\n")
+#        for deco in t.decorators:
+#            self._fill("@")
+#            self._dispatch(deco)
+#        self._fill("def "+t.name + "(")
+#        self._dispatch(t.args)
+#        self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _For(self, t):
+#        self._fill("for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    def _While(self, t):
+#        self._fill("while ")
+#        self._dispatch(t.test)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    # expr
+#    def _Str(self, tree):
+#        self._write(repr(tree.s))
+##
+#    def _Repr(self, t):
+#        self._write("`")
+#        self._dispatch(t.value)
+#        self._write("`")
+#
+#    def _Num(self, t):
+#        self._write(repr(t.n))
+#
+#    def _ListComp(self, t):
+#        self._write("[")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write("]")
+#
+#    def _GeneratorExp(self, t):
+#        self._write("(")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write(")")
+#
+#    def _comprehension(self, t):
+#        self._write(" for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        for if_clause in t.ifs:
+#            self._write(" if ")
+#            self._dispatch(if_clause)
+#
+#    def _IfExp(self, t):
+#        self._dispatch(t.body)
+#        self._write(" if ")
+#        self._dispatch(t.test)
+#        if t.orelse:
+#            self._write(" else ")
+#            self._dispatch(t.orelse)
+#
+#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
+#    def _UnaryOp(self, t):
+#        self._write(self.unop[t.op.__class__.__name__])
+#        self._write("(")
+#        self._dispatch(t.operand)
+#        self._write(")")
+#
+#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
+#                    "FloorDiv":"//", "Pow": "**"}
+#    def _BinOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.left)
+#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
+#        self._dispatch(t.right)
+#        self._write(")")
+#
+#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
+#    def _BoolOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.values[0])
+#        for v in t.values[1:]:
+#            self._write(" %s " % self.boolops[t.op.__class__])
+#            self._dispatch(v)
+#        self._write(")")
+#
+#    def _Attribute(self,t):
+#        self._dispatch(t.value)
+#        self._write(".")
+#        self._write(t.attr)
+#
+##    def _Call(self, t):
+##        self._dispatch(t.func)
+##        self._write("(")
+##        comma = False
+##        for e in t.args:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        for e in t.keywords:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        if t.starargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("*")
+##            self._dispatch(t.starargs)
+##        if t.kwargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("**")
+##            self._dispatch(t.kwargs)
+##        self._write(")")
+#
+#    # slice
+#    def _Index(self, t):
+#        self._dispatch(t.value)
+#
+#    def _ExtSlice(self, t):
+#        for i, d in enumerate(t.dims):
+#            if i != 0:
+#                self._write(': ')
+#            self._dispatch(d)
+#
+#    # others
+#    def _arguments(self, t):
+#        first = True
+#        nonDef = len(t.args)-len(t.defaults)
+#        for a in t.args[0:nonDef]:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a)
+#        for a,d in zip(t.args[nonDef:], t.defaults):
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a),
+#            self._write("=")
+#            self._dispatch(d)
+#        if t.vararg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("*"+t.vararg)
+#        if t.kwarg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("**"+t.kwarg)
+#
+##    def _keyword(self, t):
+##        self._write(t.arg)
+##        self._write("=")
+##        self._dispatch(t.value)
+#
+#    def _Lambda(self, t):
+#        self._write("lambda ")
+#        self._dispatch(t.args)
+#        self._write(": ")
+#        self._dispatch(t.body)
+
+
+
diff --git a/doc/sphinxext/numpydoc/numpydoc/docscrape.py b/doc/sphinxext/numpydoc/numpydoc/docscrape.py
new file mode 100644
index 0000000..b31d06d
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/docscrape.py
@@ -0,0 +1,525 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import inspect
+import textwrap
+import re
+import pydoc
+from warnings import warn
+import collections
+
+
+class Reader(object):
+    """A line-based string reader.
+
+    """
+    def __init__(self, data):
+        """
+        Parameters
+        ----------
+        data : str
+           String with lines separated by '\n'.
+
+        """
+        if isinstance(data,list):
+            self._str = data
+        else:
+            self._str = data.split('\n') # store string as list of lines
+
+        self.reset()
+
+    def __getitem__(self, n):
+        return self._str[n]
+
+    def reset(self):
+        self._l = 0 # current line nr
+
+    def read(self):
+        if not self.eof():
+            out = self[self._l]
+            self._l += 1
+            return out
+        else:
+            return ''
+
+    def seek_next_non_empty_line(self):
+        for l in self[self._l:]:
+            if l.strip():
+                break
+            else:
+                self._l += 1
+
+    def eof(self):
+        return self._l >= len(self._str)
+
+    def read_to_condition(self, condition_func):
+        start = self._l
+        for line in self[start:]:
+            if condition_func(line):
+                return self[start:self._l]
+            self._l += 1
+            if self.eof():
+                return self[start:self._l+1]
+        return []
+
+    def read_to_next_empty_line(self):
+        self.seek_next_non_empty_line()
+        def is_empty(line):
+            return not line.strip()
+        return self.read_to_condition(is_empty)
+
+    def read_to_next_unindented_line(self):
+        def is_unindented(line):
+            return (line.strip() and (len(line.lstrip()) == len(line)))
+        return self.read_to_condition(is_unindented)
+
+    def peek(self,n=0):
+        if self._l + n < len(self._str):
+            return self[self._l + n]
+        else:
+            return ''
+
+    def is_empty(self):
+        return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+    def __init__(self, docstring, config={}):
+        docstring = textwrap.dedent(docstring).split('\n')
+
+        self._doc = Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': [''],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Attributes': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'Warnings': [],
+            'References': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def __getitem__(self,key):
+        return self._parsed_data[key]
+
+    def __setitem__(self,key,val):
+        if key not in self._parsed_data:
+            warn("Unknown section %s" % key)
+        else:
+            self._parsed_data[key] = val
+
+    def _is_at_section(self):
+        self._doc.seek_next_non_empty_line()
+
+        if self._doc.eof():
+            return False
+
+        l1 = self._doc.peek().strip()  # e.g. Parameters
+
+        if l1.startswith('.. index::'):
+            return True
+
+        l2 = self._doc.peek(1).strip() #    ---------- or ==========
+        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+    def _strip(self,doc):
+        i = 0
+        j = 0
+        for i,line in enumerate(doc):
+            if line.strip(): break
+
+        for j,line in enumerate(doc[::-1]):
+            if line.strip(): break
+
+        return doc[i:len(doc)-j]
+
+    def _read_to_next_section(self):
+        section = self._doc.read_to_next_empty_line()
+
+        while not self._is_at_section() and not self._doc.eof():
+            if not self._doc.peek(-1).strip(): # previous line was empty
+                section += ['']
+
+            section += self._doc.read_to_next_empty_line()
+
+        return section
+
+    def _read_sections(self):
+        while not self._doc.eof():
+            data = self._read_to_next_section()
+            name = data[0].strip()
+
+            if name.startswith('..'): # index section
+                yield name, data[1:]
+            elif len(data) < 2:
+                yield StopIteration
+            else:
+                yield name, self._strip(data[2:])
+
+    def _parse_param_list(self,content):
+        r = Reader(content)
+        params = []
+        while not r.eof():
+            header = r.read().strip()
+            if ' : ' in header:
+                arg_name, arg_type = header.split(' : ')[:2]
+            else:
+                arg_name, arg_type = header, ''
+
+            desc = r.read_to_next_unindented_line()
+            desc = dedent_lines(desc)
+
+            params.append((arg_name,arg_type,desc))
+
+        return params
+
+
+    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+    def _parse_see_also(self, content):
+        """
+        func_name : Descriptive text
+            continued text
+        another_func_name : Descriptive text
+        func_name1, func_name2, :meth:`func_name`, func_name3
+
+        """
+        items = []
+
+        def parse_item_name(text):
+            """Match ':role:`name`' or 'name'"""
+            m = self._name_rgx.match(text)
+            if m:
+                g = m.groups()
+                if g[1] is None:
+                    return g[3], None
+                else:
+                    return g[2], g[1]
+            raise ValueError("%s is not a item name" % text)
+
+        def push_item(name, rest):
+            if not name:
+                return
+            name, role = parse_item_name(name)
+            items.append((name, list(rest), role))
+            del rest[:]
+
+        current_func = None
+        rest = []
+
+        for line in content:
+            if not line.strip(): continue
+
+            m = self._name_rgx.match(line)
+            if m and line[m.end():].strip().startswith(':'):
+                push_item(current_func, rest)
+                current_func, line = line[:m.end()], line[m.end():]
+                rest = [line.split(':', 1)[1].strip()]
+                if not rest[0]:
+                    rest = []
+            elif not line.startswith(' '):
+                push_item(current_func, rest)
+                current_func = None
+                if ',' in line:
+                    for func in line.split(','):
+                        if func.strip():
+                            push_item(func, [])
+                elif line.strip():
+                    current_func = line
+            elif current_func is not None:
+                rest.append(line.strip())
+        push_item(current_func, rest)
+        return items
+
+    def _parse_index(self, section, content):
+        """
+        .. index: default
+           :refguide: something, else, and more
+
+        """
+        def strip_each_in(lst):
+            return [s.strip() for s in lst]
+
+        out = {}
+        section = section.split('::')
+        if len(section) > 1:
+            out['default'] = strip_each_in(section[1].split(','))[0]
+        for line in content:
+            line = line.split(':')
+            if len(line) > 2:
+                out[line[1]] = strip_each_in(line[2].split(','))
+        return out
+
+    def _parse_summary(self):
+        """Grab signature (if given) and summary"""
+        if self._is_at_section():
+            return
+
+        # If several signatures present, take the last one
+        while True:
+            summary = self._doc.read_to_next_empty_line()
+            summary_str = " ".join([s.strip() for s in summary]).strip()
+            if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+                self['Signature'] = summary_str
+                if not self._is_at_section():
+                    continue
+            break
+
+        if summary is not None:
+            self['Summary'] = summary
+
+        if not self._is_at_section():
+            self['Extended Summary'] = self._read_to_next_section()
+
+    def _parse(self):
+        self._doc.reset()
+        self._parse_summary()
+
+        for (section,content) in self._read_sections():
+            if not section.startswith('..'):
+                section = ' '.join([s.capitalize() for s in section.split(' ')])
+            if section in ('Parameters', 'Returns', 'Raises', 'Warns',
+                           'Other Parameters', 'Attributes', 'Methods'):
+                self[section] = self._parse_param_list(content)
+            elif section.startswith('.. index::'):
+                self['index'] = self._parse_index(section, content)
+            elif section == 'See Also':
+                self['See Also'] = self._parse_see_also(content)
+            else:
+                self[section] = content
+
+    # string conversion routines
+
+    def _str_header(self, name, symbol='-'):
+        return [name, len(name)*symbol]
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        if self['Signature']:
+            return [self['Signature'].replace('*','\*')] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        if self['Summary']:
+            return self['Summary'] + ['']
+        else:
+            return []
+
+    def _str_extended_summary(self):
+        if self['Extended Summary']:
+            return self['Extended Summary'] + ['']
+        else:
+            return []
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            for param,param_type,desc in self[name]:
+                if param_type:
+                    out += ['%s : %s' % (param, param_type)]
+                else:
+                    out += [param]
+                out += self._str_indent(desc)
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += self[name]
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        if not self['See Also']: return []
+        out = []
+        out += self._str_header("See Also")
+        last_had_desc = True
+        for func, desc, role in self['See Also']:
+            if role:
+                link = ':%s:`%s`' % (role, func)
+            elif func_role:
+                link = ':%s:`%s`' % (func_role, func)
+            else:
+                link = "`%s`_" % func
+            if desc or last_had_desc:
+                out += ['']
+                out += [link]
+            else:
+                out[-1] += ", %s" % link
+            if desc:
+                out += self._str_indent([' '.join(desc)])
+                last_had_desc = True
+            else:
+                last_had_desc = False
+        out += ['']
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.items():
+            if section == 'default':
+                continue
+            out += ['   :%s: %s' % (section, ', '.join(references))]
+        return out
+
+    def __str__(self, func_role=''):
+        out = []
+        out += self._str_signature()
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Returns', 'Other Parameters',
+                           'Raises', 'Warns'):
+            out += self._str_param_list(param_list)
+        out += self._str_section('Warnings')
+        out += self._str_see_also(func_role)
+        for s in ('Notes','References','Examples'):
+            out += self._str_section(s)
+        for param_list in ('Attributes', 'Methods'):
+            out += self._str_param_list(param_list)
+        out += self._str_index()
+        return '\n'.join(out)
+
+
+def indent(str,indent=4):
+    indent_str = ' '*indent
+    if str is None:
+        return indent_str
+    lines = str.split('\n')
+    return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+    """Deindent a list of lines maximally"""
+    return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+    return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+    def __init__(self, func, role='func', doc=None, config={}):
+        self._f = func
+        self._role = role # e.g. "func" or "meth"
+
+        if doc is None:
+            if func is None:
+                raise ValueError("No function or docstring given")
+            doc = inspect.getdoc(func) or ''
+        NumpyDocString.__init__(self, doc)
+
+        if not self['Signature'] and func is not None:
+            func, func_name = self.get_func()
+            try:
+                # try to read signature
+                argspec = inspect.getargspec(func)
+                argspec = inspect.formatargspec(*argspec)
+                argspec = argspec.replace('*','\*')
+                signature = '%s%s' % (func_name, argspec)
+            except TypeError as e:
+                signature = '%s()' % func_name
+            self['Signature'] = signature
+
+    def get_func(self):
+        func_name = getattr(self._f, '__name__', self.__class__.__name__)
+        if inspect.isclass(self._f):
+            func = getattr(self._f, '__call__', self._f.__init__)
+        else:
+            func = self._f
+        return func, func_name
+
+    def __str__(self):
+        out = ''
+
+        func, func_name = self.get_func()
+        signature = self['Signature'].replace('*', '\*')
+
+        roles = {'func': 'function',
+                 'meth': 'method'}
+
+        if self._role:
+            if self._role not in roles:
+                print("Warning: invalid role %s" % self._role)
+            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
+                                             func_name)
+
+        out += super(FunctionDoc, self).__str__(func_role=self._role)
+        return out
+
+
+class ClassDoc(NumpyDocString):
+
+    def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
+                 config={}):
+        if not inspect.isclass(cls) and cls is not None:
+            raise ValueError("Expected a class or None, but got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+
+        if doc is None:
+            if cls is None:
+                raise ValueError("No class or documentation string given")
+            doc = pydoc.getdoc(cls)
+
+        NumpyDocString.__init__(self, doc)
+
+        if config.get('show_class_members', True):
+            def splitlines_x(s):
+                if not s:
+                    return []
+                else:
+                    return s.splitlines()
+
+            for field, items in [('Methods', self.methods),
+                                 ('Attributes', self.properties)]:
+                if not self[field]:
+                    doc_list = []
+                    for name in sorted(items):
+                         try:
+                            doc_item = pydoc.getdoc(getattr(self._cls, name))
+                            doc_list.append((name, '', splitlines_x(doc_item)))
+                         except AttributeError:
+                            pass # method doesn't exist
+                    self[field] = doc_list
+
+    @property
+    def methods(self):
+        if self._cls is None:
+            return []
+        return [name for name,func in inspect.getmembers(self._cls)
+                if ((not name.startswith('_') or
+                     '.. shownumpydoc' in pydoc.getdoc(func))
+                    and isinstance(func, collections.Callable))]
+
+    @property
+    def properties(self):
+        if self._cls is None:
+            return []
+        return [name for name,func in inspect.getmembers(self._cls)
+                if not name.startswith('_') and
+                (func is None or isinstance(func, property) or
+                 inspect.isgetsetdescriptor(func))]
diff --git a/doc/sphinxext/numpydoc/numpydoc/docscrape_sphinx.py b/doc/sphinxext/numpydoc/numpydoc/docscrape_sphinx.py
new file mode 100644
index 0000000..cdc2a37
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/docscrape_sphinx.py
@@ -0,0 +1,274 @@
+from __future__ import division, absolute_import, print_function
+
+import sys, re, inspect, textwrap, pydoc
+import sphinx
+import collections
+from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+if sys.version_info[0] >= 3:
+    sixu = lambda s: s
+else:
+    sixu = lambda s: unicode(s, 'unicode_escape')
+
+
+class SphinxDocString(NumpyDocString):
+    def __init__(self, docstring, config={}):
+        NumpyDocString.__init__(self, docstring, config=config)
+        self.load_config(config)
+
+    def load_config(self, config):
+        self.use_plots = config.get('use_plots', False)
+        self.class_members_toctree = config.get('class_members_toctree', True)
+
+    # string conversion routines
+    def _str_header(self, name, symbol='`'):
+        return ['.. rubric:: ' + name, '']
+
+    def _str_field_list(self, name):
+        return [':' + name + ':']
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        return ['']
+        if self['Signature']:
+            return ['``%s``' % self['Signature']] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Extended Summary'] + ['']
+
+    def _str_returns(self):
+        out = []
+        if self['Returns']:
+            out += self._str_field_list('Returns')
+            out += ['']
+            for param, param_type, desc in self['Returns']:
+                if param_type:
+                    out += self._str_indent(['**%s** : %s' % (param.strip(),
+                                                              param_type)])
+                else:
+                    out += self._str_indent([param.strip()])
+                if desc:
+                    out += ['']
+                    out += self._str_indent(desc, 8)
+                out += ['']
+        return out
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_field_list(name)
+            out += ['']
+            for param, param_type, desc in self[name]:
+                if param_type:
+                    out += self._str_indent(['**%s** : %s' % (param.strip(),
+                                                              param_type)])
+                else:
+                    out += self._str_indent(['**%s**' % param.strip()])
+                if desc:
+                    out += ['']
+                    out += self._str_indent(desc, 8)
+                out += ['']
+        return out
+
+    @property
+    def _obj(self):
+        if hasattr(self, '_cls'):
+            return self._cls
+        elif hasattr(self, '_f'):
+            return self._f
+        return None
+
+    def _str_member_list(self, name):
+        """
+        Generate a member listing, autosummary:: table where possible,
+        and a table where not.
+
+        """
+        out = []
+        if self[name]:
+            out += ['.. rubric:: %s' % name, '']
+            prefix = getattr(self, '_name', '')
+
+            if prefix:
+                prefix = '~%s.' % prefix
+
+            autosum = []
+            others = []
+            for param, param_type, desc in self[name]:
+                param = param.strip()
+
+                # Check if the referenced member can have a docstring or not
+                param_obj = getattr(self._obj, param, None)
+                if not (callable(param_obj)
+                        or isinstance(param_obj, property)
+                        or inspect.isgetsetdescriptor(param_obj)):
+                    param_obj = None
+
+                if param_obj and (pydoc.getdoc(param_obj) or not desc):
+                    # Referenced object has a docstring
+                    autosum += ["   %s%s" % (prefix, param)]
+                else:
+                    others.append((param, param_type, desc))
+
+            if autosum:
+                out += ['.. autosummary::']
+                if self.class_members_toctree:
+                    out += ['   :toctree:']
+                out += [''] + autosum
+
+            if others:
+                maxlen_0 = max(3, max([len(x[0]) for x in others]))
+                hdr = sixu("=")*maxlen_0 + sixu("  ") + sixu("=")*10
+                fmt = sixu('%%%ds  %%s  ') % (maxlen_0,)
+                out += ['', hdr]
+                for param, param_type, desc in others:
+                    desc = sixu(" ").join(x.strip() for x in desc).strip()
+                    if param_type:
+                        desc = "(%s) %s" % (param_type, desc)
+                    out += [fmt % (param.strip(), desc)]
+                out += [hdr]
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += ['']
+            content = textwrap.dedent("\n".join(self[name])).split("\n")
+            out += content
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        out = []
+        if self['See Also']:
+            see_also = super(SphinxDocString, self)._str_see_also(func_role)
+            out = ['.. seealso::', '']
+            out += self._str_indent(see_also[2:])
+        return out
+
+    def _str_warnings(self):
+        out = []
+        if self['Warnings']:
+            out = ['.. warning::', '']
+            out += self._str_indent(self['Warnings'])
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        if len(idx) == 0:
+            return out
+
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.items():
+            if section == 'default':
+                continue
+            elif section == 'refguide':
+                out += ['   single: %s' % (', '.join(references))]
+            else:
+                out += ['   %s: %s' % (section, ','.join(references))]
+        return out
+
+    def _str_references(self):
+        out = []
+        if self['References']:
+            out += self._str_header('References')
+            if isinstance(self['References'], str):
+                self['References'] = [self['References']]
+            out.extend(self['References'])
+            out += ['']
+            # Latex collects all references to a separate bibliography,
+            # so we need to insert links to it
+            if sphinx.__version__ >= "0.6":
+                out += ['.. only:: latex','']
+            else:
+                out += ['.. latexonly::','']
+            items = []
+            for line in self['References']:
+                m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
+                if m:
+                    items.append(m.group(1))
+            out += ['   ' + ", ".join(["[%s]_" % item for item in items]), '']
+        return out
+
+    def _str_examples(self):
+        examples_str = "\n".join(self['Examples'])
+
+        if (self.use_plots and 'import matplotlib' in examples_str
+                and 'plot::' not in examples_str):
+            out = []
+            out += self._str_header('Examples')
+            out += ['.. plot::', '']
+            out += self._str_indent(self['Examples'])
+            out += ['']
+            return out
+        else:
+            return self._str_section('Examples')
+
+    def __str__(self, indent=0, func_role="obj"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        out += self._str_param_list('Parameters')
+        out += self._str_returns()
+        for param_list in ('Other Parameters', 'Raises', 'Warns'):
+            out += self._str_param_list(param_list)
+        out += self._str_warnings()
+        out += self._str_see_also(func_role)
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_examples()
+        for param_list in ('Attributes', 'Methods'):
+            out += self._str_member_list(param_list)
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+    def __init__(self, obj, doc=None, config={}):
+        self.load_config(config)
+        FunctionDoc.__init__(self, obj, doc=doc, config=config)
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+    def __init__(self, obj, doc=None, func_doc=None, config={}):
+        self.load_config(config)
+        ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
+
+class SphinxObjDoc(SphinxDocString):
+    def __init__(self, obj, doc=None, config={}):
+        self._f = obj
+        self.load_config(config)
+        SphinxDocString.__init__(self, doc, config=config)
+
+def get_doc_object(obj, what=None, doc=None, config={}):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif isinstance(obj, collections.Callable):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
+                              config=config)
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, doc=doc, config=config)
+    else:
+        if doc is None:
+            doc = pydoc.getdoc(obj)
+        return SphinxObjDoc(obj, doc, config=config)
diff --git a/doc/sphinxext/numpydoc/numpydoc/linkcode.py b/doc/sphinxext/numpydoc/numpydoc/linkcode.py
new file mode 100644
index 0000000..1ad3ab8
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/linkcode.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+"""
+    linkcode
+    ~~~~~~~~
+
+    Add external links to module code in Python object descriptions.
+
+    :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+import collections
+
+warnings.warn("This extension has been accepted to Sphinx upstream. "
+              "Use the version from there (Sphinx >= 1.2) "
+              "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode",
+              FutureWarning, stacklevel=1)
+
+
+from docutils import nodes
+
+from sphinx import addnodes
+from sphinx.locale import _
+from sphinx.errors import SphinxError
+
+class LinkcodeError(SphinxError):
+    category = "linkcode error"
+
+def doctree_read(app, doctree):
+    env = app.builder.env
+
+    resolve_target = getattr(env.config, 'linkcode_resolve', None)
+    if not isinstance(env.config.linkcode_resolve, collections.Callable):
+        raise LinkcodeError(
+            "Function `linkcode_resolve` is not given in conf.py")
+
+    domain_keys = dict(
+        py=['module', 'fullname'],
+        c=['names'],
+        cpp=['names'],
+        js=['object', 'fullname'],
+    )
+
+    for objnode in doctree.traverse(addnodes.desc):
+        domain = objnode.get('domain')
+        uris = set()
+        for signode in objnode:
+            if not isinstance(signode, addnodes.desc_signature):
+                continue
+
+            # Convert signode to a specified format
+            info = {}
+            for key in domain_keys.get(domain, []):
+                value = signode.get(key)
+                if not value:
+                    value = ''
+                info[key] = value
+            if not info:
+                continue
+
+            # Call user code to resolve the link
+            uri = resolve_target(domain, info)
+            if not uri:
+                # no source
+                continue
+
+            if uri in uris or not uri:
+                # only one link per name, please
+                continue
+            uris.add(uri)
+
+            onlynode = addnodes.only(expr='html')
+            onlynode += nodes.reference('', '', internal=False, refuri=uri)
+            onlynode[0] += nodes.inline('', _('[source]'),
+                                        classes=['viewcode-link'])
+            signode += onlynode
+
+def setup(app):
+    app.connect('doctree-read', doctree_read)
+    app.add_config_value('linkcode_resolve', None, '')
diff --git a/doc/sphinxext/numpydoc/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc/numpydoc.py
new file mode 100644
index 0000000..2bc2d1e
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/numpydoc.py
@@ -0,0 +1,187 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os, sys, re, pydoc
+import sphinx
+import inspect
+import collections
+
+if sphinx.__version__ < '1.0.1':
+    raise RuntimeError("Sphinx 1.0.1 or newer is required")
+
+from .docscrape_sphinx import get_doc_object, SphinxDocString
+from sphinx.util.compat import Directive
+
+if sys.version_info[0] >= 3:
+    sixu = lambda s: s
+else:
+    sixu = lambda s: unicode(s, 'unicode_escape')
+
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+                      reference_offset=[0]):
+
+    cfg = dict(use_plots=app.config.numpydoc_use_plots,
+               show_class_members=app.config.numpydoc_show_class_members,
+               class_members_toctree=app.config.numpydoc_class_members_toctree,
+              )
+
+    if what == 'module':
+        # Strip top title
+        title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'),
+                              re.I|re.S)
+        lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n"))
+    else:
+        doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg)
+        if sys.version_info[0] >= 3:
+            doc = str(doc)
+        else:
+            doc = unicode(doc)
+        lines[:] = doc.split(sixu("\n"))
+
+    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+           obj.__name__:
+        if hasattr(obj, '__module__'):
+            v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__))
+        else:
+            v = dict(full_name=obj.__name__)
+        lines += [sixu(''), sixu('.. htmlonly::'), sixu('')]
+        lines += [sixu('    %s') % x for x in
+                  (app.config.numpydoc_edit_link % v).split("\n")]
+
+    # replace reference numbers so that there are no duplicates
+    references = []
+    for line in lines:
+        line = line.strip()
+        m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I)
+        if m:
+            references.append(m.group(1))
+
+    # start renaming from the longest string, to avoid overwriting parts
+    references.sort(key=lambda x: -len(x))
+    if references:
+        for i, line in enumerate(lines):
+            for r in references:
+                if re.match(sixu('^\\d+$'), r):
+                    new_r = sixu("R%d") % (reference_offset[0] + int(r))
+                else:
+                    new_r = sixu("%s%d") % (r, reference_offset[0])
+                lines[i] = lines[i].replace(sixu('[%s]_') % r,
+                                            sixu('[%s]_') % new_r)
+                lines[i] = lines[i].replace(sixu('.. [%s]') % r,
+                                            sixu('.. [%s]') % new_r)
+
+    reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+    # Do not try to inspect classes that don't define `__init__`
+    if (inspect.isclass(obj) and
+        (not hasattr(obj, '__init__') or
+        'initializes x; see ' in pydoc.getdoc(obj.__init__))):
+        return '', ''
+
+    if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return
+    if not hasattr(obj, '__doc__'): return
+
+    doc = SphinxDocString(pydoc.getdoc(obj))
+    if doc['Signature']:
+        sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature'])
+        return sig, sixu('')
+
+def setup(app, get_doc_object_=get_doc_object):
+    if not hasattr(app, 'add_config_value'):
+        return # probably called by nose, better bail out
+
+    global get_doc_object
+    get_doc_object = get_doc_object_
+
+    app.connect('autodoc-process-docstring', mangle_docstrings)
+    app.connect('autodoc-process-signature', mangle_signature)
+    app.add_config_value('numpydoc_edit_link', None, False)
+    app.add_config_value('numpydoc_use_plots', None, False)
+    app.add_config_value('numpydoc_show_class_members', True, True)
+    app.add_config_value('numpydoc_class_members_toctree', True, True)
+
+    # Extra mangling domains
+    app.add_domain(NumpyPythonDomain)
+    app.add_domain(NumpyCDomain)
+
+#------------------------------------------------------------------------------
+# Docstring-mangling domains
+#------------------------------------------------------------------------------
+
+from docutils.statemachine import ViewList
+from sphinx.domains.c import CDomain
+from sphinx.domains.python import PythonDomain
+
+class ManglingDomainBase(object):
+    directive_mangling_map = {}
+
+    def __init__(self, *a, **kw):
+        super(ManglingDomainBase, self).__init__(*a, **kw)
+        self.wrap_mangling_directives()
+
+    def wrap_mangling_directives(self):
+        for name, objtype in list(self.directive_mangling_map.items()):
+            self.directives[name] = wrap_mangling_directive(
+                self.directives[name], objtype)
+
+class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
+    name = 'np'
+    directive_mangling_map = {
+        'function': 'function',
+        'class': 'class',
+        'exception': 'class',
+        'method': 'function',
+        'classmethod': 'function',
+        'staticmethod': 'function',
+        'attribute': 'attribute',
+    }
+    indices = []
+
+class NumpyCDomain(ManglingDomainBase, CDomain):
+    name = 'np-c'
+    directive_mangling_map = {
+        'function': 'function',
+        'member': 'attribute',
+        'macro': 'function',
+        'type': 'class',
+        'var': 'object',
+    }
+
+def wrap_mangling_directive(base_directive, objtype):
+    class directive(base_directive):
+        def run(self):
+            env = self.state.document.settings.env
+
+            name = None
+            if self.arguments:
+                m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
+                name = m.group(2).strip()
+
+            if not name:
+                name = self.arguments[0]
+
+            lines = list(self.content)
+            mangle_docstrings(env.app, objtype, name, None, None, lines)
+            self.content = ViewList(lines, self.content.parent)
+
+            return base_directive.run(self)
+
+    return directive
diff --git a/doc/sphinxext/numpydoc/numpydoc/phantom_import.py b/doc/sphinxext/numpydoc/numpydoc/phantom_import.py
new file mode 100644
index 0000000..9a60b4a
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/phantom_import.py
@@ -0,0 +1,167 @@
+"""
+==============
+phantom_import
+==============
+
+Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
+extensions to use docstrings loaded from an XML file.
+
+This extension loads an XML file in the Pydocweb format [1] and
+creates a dummy module that contains the specified docstrings. This
+can be used to get the current docstrings from a Pydocweb instance
+without needing to rebuild the documented module.
+
+.. [1] http://code.google.com/p/pydocweb
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import imp, sys, compiler, types, os, inspect, re
+
+def setup(app):
+    app.connect('builder-inited', initialize)
+    app.add_config_value('phantom_import_file', None, True)
+
+def initialize(app):
+    fn = app.config.phantom_import_file
+    if (fn and os.path.isfile(fn)):
+        print("[numpydoc] Phantom importing modules from", fn, "...")
+        import_phantom_module(fn)
+
+#------------------------------------------------------------------------------
+# Creating 'phantom' modules from an XML description
+#------------------------------------------------------------------------------
+def import_phantom_module(xml_file):
+    """
+    Insert a fake Python module to sys.modules, based on a XML file.
+
+    The XML file is expected to conform to Pydocweb DTD. The fake
+    module will contain dummy objects, which guarantee the following:
+
+    - Docstrings are correct.
+    - Class inheritance relationships are correct (if present in XML).
+    - Function argspec is *NOT* correct (even if present in XML).
+      Instead, the function signature is prepended to the function docstring.
+    - Class attributes are *NOT* correct; instead, they are dummy objects.
+
+    Parameters
+    ----------
+    xml_file : str
+        Name of an XML file to read
+    
+    """
+    import lxml.etree as etree
+
+    object_cache = {}
+
+    tree = etree.parse(xml_file)
+    root = tree.getroot()
+
+    # Sort items so that
+    # - Base classes come before classes inherited from them
+    # - Modules come before their contents
+    all_nodes = dict([(n.attrib['id'], n) for n in root])
+    
+    def _get_bases(node, recurse=False):
+        bases = [x.attrib['ref'] for x in node.findall('base')]
+        if recurse:
+            j = 0
+            while True:
+                try:
+                    b = bases[j]
+                except IndexError: break
+                if b in all_nodes:
+                    bases.extend(_get_bases(all_nodes[b]))
+                j += 1
+        return bases
+
+    type_index = ['module', 'class', 'callable', 'object']
+    
+    def base_cmp(a, b):
+        x = cmp(type_index.index(a.tag), type_index.index(b.tag))
+        if x != 0: return x
+
+        if a.tag == 'class' and b.tag == 'class':
+            a_bases = _get_bases(a, recurse=True)
+            b_bases = _get_bases(b, recurse=True)
+            x = cmp(len(a_bases), len(b_bases))
+            if x != 0: return x
+            if a.attrib['id'] in b_bases: return -1
+            if b.attrib['id'] in a_bases: return 1
+        
+        return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
+
+    nodes = root.getchildren()
+    nodes.sort(base_cmp)
+
+    # Create phantom items
+    for node in nodes:
+        name = node.attrib['id']
+        doc = (node.text or '').decode('string-escape') + "\n"
+        if doc == "\n": doc = ""
+
+        # create parent, if missing
+        parent = name
+        while True:
+            parent = '.'.join(parent.split('.')[:-1])
+            if not parent: break
+            if parent in object_cache: break
+            obj = imp.new_module(parent)
+            object_cache[parent] = obj
+            sys.modules[parent] = obj
+
+        # create object
+        if node.tag == 'module':
+            obj = imp.new_module(name)
+            obj.__doc__ = doc
+            sys.modules[name] = obj
+        elif node.tag == 'class':
+            bases = [object_cache[b] for b in _get_bases(node)
+                     if b in object_cache]
+            bases.append(object)
+            init = lambda self: None
+            init.__doc__ = doc
+            obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
+            obj.__name__ = name.split('.')[-1]
+        elif node.tag == 'callable':
+            funcname = node.attrib['id'].split('.')[-1]
+            argspec = node.attrib.get('argspec')
+            if argspec:
+                argspec = re.sub('^[^(]*', '', argspec)
+                doc = "%s%s\n\n%s" % (funcname, argspec, doc)
+            obj = lambda: 0
+            obj.__argspec_is_invalid_ = True
+            if sys.version_info[0] >= 3:
+                obj.__name__ = funcname
+            else:
+                obj.func_name = funcname
+            obj.__name__ = name
+            obj.__doc__ = doc
+            if inspect.isclass(object_cache[parent]):
+                obj.__objclass__ = object_cache[parent]
+        else:
+            class Dummy(object): pass
+            obj = Dummy()
+            obj.__name__ = name
+            obj.__doc__ = doc
+            if inspect.isclass(object_cache[parent]):
+                obj.__get__ = lambda: None
+        object_cache[name] = obj
+
+        if parent:
+            if inspect.ismodule(object_cache[parent]):
+                obj.__module__ = parent
+                setattr(object_cache[parent], name.split('.')[-1], obj)
+
+    # Populate items
+    for node in root:
+        obj = object_cache.get(node.attrib['id'])
+        if obj is None: continue
+        for ref in node.findall('ref'):
+            if node.tag == 'class':
+                if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
+                    setattr(obj, ref.attrib['name'],
+                            object_cache.get(ref.attrib['ref']))
+            else:
+                setattr(obj, ref.attrib['name'],
+                        object_cache.get(ref.attrib['ref']))
diff --git a/doc/sphinxext/numpydoc/numpydoc/plot_directive.py b/doc/sphinxext/numpydoc/numpydoc/plot_directive.py
new file mode 100644
index 0000000..2014f85
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/plot_directive.py
@@ -0,0 +1,642 @@
+"""
+A special directive for generating a matplotlib plot.
+
+.. warning::
+
+   This is a hacked version of plot_directive.py from Matplotlib.
+   It's very much subject to change!
+
+
+Usage
+-----
+
+Can be used like this::
+
+    .. plot:: examples/example.py
+
+    .. plot::
+
+       import matplotlib.pyplot as plt
+       plt.plot([1,2,3], [4,5,6])
+
+    .. plot::
+
+       A plotting example:
+
+       >>> import matplotlib.pyplot as plt
+       >>> plt.plot([1,2,3], [4,5,6])
+
+The content is interpreted as doctest formatted if it has a line starting
+with ``>>>``.
+
+The ``plot`` directive supports the options
+
+    format : {'python', 'doctest'}
+        Specify the format of the input
+
+    include-source : bool
+        Whether to display the source code. Default can be changed in conf.py
+
+and the ``image`` directive options ``alt``, ``height``, ``width``,
+``scale``, ``align``, ``class``.
+
+Configuration options
+---------------------
+
+The plot directive has the following configuration options:
+
+    plot_include_source
+        Default value for the include-source option
+
+    plot_pre_code
+        Code that should be executed before each plot.
+
+    plot_basedir
+        Base directory, to which plot:: file names are relative to.
+        (If None or empty, file names are relative to the directoly where
+        the file containing the directive is.)
+
+    plot_formats
+        File formats to generate. List of tuples or strings::
+
+            [(suffix, dpi), suffix, ...]
+
+        that determine the file format and the DPI. For entries whose
+        DPI was omitted, sensible defaults are chosen.
+
+    plot_html_show_formats
+        Whether to show links to the files in HTML.
+
+TODO
+----
+
+* Refactor Latex output; now it's plain images, but it would be nice
+  to make them appear side-by-side, or in floats.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
+import sphinx
+
+if sys.version_info[0] >= 3:
+    from io import StringIO
+else:
+    from io import StringIO
+
+import warnings
+warnings.warn("A plot_directive module is also available under "
+              "matplotlib.sphinxext; expect this numpydoc.plot_directive "
+              "module to be deprecated after relevant features have been "
+              "integrated there.",
+              FutureWarning, stacklevel=2)
+
+
+#------------------------------------------------------------------------------
+# Registration hook
+#------------------------------------------------------------------------------
+
+def setup(app):
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    app.add_config_value('plot_pre_code', '', True)
+    app.add_config_value('plot_include_source', False, True)
+    app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
+    app.add_config_value('plot_basedir', None, True)
+    app.add_config_value('plot_html_show_formats', True, True)
+
+    app.add_directive('plot', plot_directive, True, (0, 1, False),
+                      **plot_directive_options)
+
+#------------------------------------------------------------------------------
+# plot:: directive
+#------------------------------------------------------------------------------
+from docutils.parsers.rst import directives
+from docutils import nodes
+
+def plot_directive(name, arguments, options, content, lineno,
+                   content_offset, block_text, state, state_machine):
+    return run(arguments, content, options, state_machine, state, lineno)
+plot_directive.__doc__ = __doc__
+
+def _option_boolean(arg):
+    if not arg or not arg.strip():
+        # no argument given, assume used as a flag
+        return True
+    elif arg.strip().lower() in ('no', '0', 'false'):
+        return False
+    elif arg.strip().lower() in ('yes', '1', 'true'):
+        return True
+    else:
+        raise ValueError('"%s" unknown boolean' % arg)
+
+def _option_format(arg):
+    return directives.choice(arg, ('python', 'lisp'))
+
+def _option_align(arg):
+    return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
+                                   "right"))
+
+plot_directive_options = {'alt': directives.unchanged,
+                          'height': directives.length_or_unitless,
+                          'width': directives.length_or_percentage_or_unitless,
+                          'scale': directives.nonnegative_int,
+                          'align': _option_align,
+                          'class': directives.class_option,
+                          'include-source': _option_boolean,
+                          'format': _option_format,
+                          }
+
+#------------------------------------------------------------------------------
+# Generating output
+#------------------------------------------------------------------------------
+
+from docutils import nodes, utils
+
+try:
+    # Sphinx depends on either Jinja or Jinja2
+    import jinja2
+    def format_template(template, **kw):
+        return jinja2.Template(template).render(**kw)
+except ImportError:
+    import jinja
+    def format_template(template, **kw):
+        return jinja.from_string(template, **kw)
+
+TEMPLATE = """
+{{ source_code }}
+
+{{ only_html }}
+
+   {% if source_link or (html_show_formats and not multi_image) %}
+   (
+   {%- if source_link -%}
+   `Source code <{{ source_link }}>`__
+   {%- endif -%}
+   {%- if html_show_formats and not multi_image -%}
+     {%- for img in images -%}
+       {%- for fmt in img.formats -%}
+         {%- if source_link or not loop.first -%}, {% endif -%}
+         `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+       {%- endfor -%}
+     {%- endfor -%}
+   {%- endif -%}
+   )
+   {% endif %}
+
+   {% for img in images %}
+   .. figure:: {{ build_dir }}/{{ img.basename }}.png
+      {%- for option in options %}
+      {{ option }}
+      {% endfor %}
+
+      {% if html_show_formats and multi_image -%}
+        (
+        {%- for fmt in img.formats -%}
+        {%- if not loop.first -%}, {% endif -%}
+        `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+        {%- endfor -%}
+        )
+      {%- endif -%}
+   {% endfor %}
+
+{{ only_latex }}
+
+   {% for img in images %}
+   .. image:: {{ build_dir }}/{{ img.basename }}.pdf
+   {% endfor %}
+
+"""
+
+class ImageFile(object):
+    def __init__(self, basename, dirname):
+        self.basename = basename
+        self.dirname = dirname
+        self.formats = []
+
+    def filename(self, format):
+        return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
+
+    def filenames(self):
+        return [self.filename(fmt) for fmt in self.formats]
+
+def run(arguments, content, options, state_machine, state, lineno):
+    if arguments and content:
+        raise RuntimeError("plot:: directive can't have both args and content")
+
+    document = state_machine.document
+    config = document.settings.env.config
+
+    options.setdefault('include-source', config.plot_include_source)
+
+    # determine input
+    rst_file = document.attributes['source']
+    rst_dir = os.path.dirname(rst_file)
+
+    if arguments:
+        if not config.plot_basedir:
+            source_file_name = os.path.join(rst_dir,
+                                            directives.uri(arguments[0]))
+        else:
+            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
+                                            directives.uri(arguments[0]))
+        code = open(source_file_name, 'r').read()
+        output_base = os.path.basename(source_file_name)
+    else:
+        source_file_name = rst_file
+        code = textwrap.dedent("\n".join(map(str, content)))
+        counter = document.attributes.get('_plot_counter', 0) + 1
+        document.attributes['_plot_counter'] = counter
+        base, ext = os.path.splitext(os.path.basename(source_file_name))
+        output_base = '%s-%d.py' % (base, counter)
+
+    base, source_ext = os.path.splitext(output_base)
+    if source_ext in ('.py', '.rst', '.txt'):
+        output_base = base
+    else:
+        source_ext = ''
+
+    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
+    output_base = output_base.replace('.', '-')
+
+    # is it in doctest format?
+    is_doctest = contains_doctest(code)
+    if 'format' in options:
+        if options['format'] == 'python':
+            is_doctest = False
+        else:
+            is_doctest = True
+
+    # determine output directory name fragment
+    source_rel_name = relpath(source_file_name, setup.confdir)
+    source_rel_dir = os.path.dirname(source_rel_name)
+    while source_rel_dir.startswith(os.path.sep):
+        source_rel_dir = source_rel_dir[1:]
+
+    # build_dir: where to place output files (temporarily)
+    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
+                             'plot_directive',
+                             source_rel_dir)
+    if not os.path.exists(build_dir):
+        os.makedirs(build_dir)
+
+    # output_dir: final location in the builder's directory
+    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
+                                            source_rel_dir))
+
+    # how to link to files from the RST file
+    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
+                                 source_rel_dir).replace(os.path.sep, '/')
+    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
+    source_link = dest_dir_link + '/' + output_base + source_ext
+
+    # make figures
+    try:
+        results = makefig(code, source_file_name, build_dir, output_base,
+                          config)
+        errors = []
+    except PlotError as err:
+        reporter = state.memo.reporter
+        sm = reporter.system_message(
+            2, "Exception occurred in plotting %s: %s" % (output_base, err),
+            line=lineno)
+        results = [(code, [])]
+        errors = [sm]
+
+    # generate output restructuredtext
+    total_lines = []
+    for j, (code_piece, images) in enumerate(results):
+        if options['include-source']:
+            if is_doctest:
+                lines = ['']
+                lines += [row.rstrip() for row in code_piece.split('\n')]
+            else:
+                lines = ['.. code-block:: python', '']
+                lines += ['    %s' % row.rstrip()
+                          for row in code_piece.split('\n')]
+            source_code = "\n".join(lines)
+        else:
+            source_code = ""
+
+        opts = [':%s: %s' % (key, val) for key, val in list(options.items())
+                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
+
+        only_html = ".. only:: html"
+        only_latex = ".. only:: latex"
+
+        if j == 0:
+            src_link = source_link
+        else:
+            src_link = None
+
+        result = format_template(
+            TEMPLATE,
+            dest_dir=dest_dir_link,
+            build_dir=build_dir_link,
+            source_link=src_link,
+            multi_image=len(images) > 1,
+            only_html=only_html,
+            only_latex=only_latex,
+            options=opts,
+            images=images,
+            source_code=source_code,
+            html_show_formats=config.plot_html_show_formats)
+
+        total_lines.extend(result.split("\n"))
+        total_lines.extend("\n")
+
+    if total_lines:
+        state_machine.insert_input(total_lines, source=source_file_name)
+
+    # copy image files to builder's output directory
+    if not os.path.exists(dest_dir):
+        os.makedirs(dest_dir)
+
+    for code_piece, images in results:
+        for img in images:
+            for fn in img.filenames():
+                shutil.copyfile(fn, os.path.join(dest_dir,
+                                                 os.path.basename(fn)))
+
+    # copy script (if necessary)
+    if source_file_name == rst_file:
+        target_name = os.path.join(dest_dir, output_base + source_ext)
+        f = open(target_name, 'w')
+        f.write(unescape_doctest(code))
+        f.close()
+
+    return errors
+
+
+#------------------------------------------------------------------------------
+# Run code and capture figures
+#------------------------------------------------------------------------------
+
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import matplotlib.image as image
+from matplotlib import _pylab_helpers
+
+import exceptions
+
+def contains_doctest(text):
+    try:
+        # check if it's valid Python as-is
+        compile(text, '<string>', 'exec')
+        return False
+    except SyntaxError:
+        pass
+    r = re.compile(r'^\s*>>>', re.M)
+    m = r.search(text)
+    return bool(m)
+
+def unescape_doctest(text):
+    """
+    Extract code from a piece of text, which contains either Python code
+    or doctests.
+
+    """
+    if not contains_doctest(text):
+        return text
+
+    code = ""
+    for line in text.split("\n"):
+        m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
+        if m:
+            code += m.group(2) + "\n"
+        elif line.strip():
+            code += "# " + line.strip() + "\n"
+        else:
+            code += "\n"
+    return code
+
+def split_code_at_show(text):
+    """
+    Split code at plt.show()
+
+    """
+
+    parts = []
+    is_doctest = contains_doctest(text)
+
+    part = []
+    for line in text.split("\n"):
+        if (not is_doctest and line.strip() == 'plt.show()') or \
+               (is_doctest and line.strip() == '>>> plt.show()'):
+            part.append(line)
+            parts.append("\n".join(part))
+            part = []
+        else:
+            part.append(line)
+    if "\n".join(part).strip():
+        parts.append("\n".join(part))
+    return parts
+
+class PlotError(RuntimeError):
+    pass
+
+def run_code(code, code_path, ns=None):
+    # Change the working directory to the directory of the example, so
+    # it can get at its data files, if any.
+    pwd = os.getcwd()
+    old_sys_path = list(sys.path)
+    if code_path is not None:
+        dirname = os.path.abspath(os.path.dirname(code_path))
+        os.chdir(dirname)
+        sys.path.insert(0, dirname)
+
+    # Redirect stdout
+    stdout = sys.stdout
+    sys.stdout = StringIO()
+
+    # Reset sys.argv
+    old_sys_argv = sys.argv
+    sys.argv = [code_path]
+    
+    try:
+        try:
+            code = unescape_doctest(code)
+            if ns is None:
+                ns = {}
+            if not ns:
+                exec(setup.config.plot_pre_code, ns)
+            exec(code, ns)
+        except (Exception, SystemExit) as err:
+            raise PlotError(traceback.format_exc())
+    finally:
+        os.chdir(pwd)
+        sys.argv = old_sys_argv
+        sys.path[:] = old_sys_path
+        sys.stdout = stdout
+    return ns
+
+
+#------------------------------------------------------------------------------
+# Generating figures
+#------------------------------------------------------------------------------
+
+def out_of_date(original, derived):
+    """
+    Returns True if derivative is out-of-date wrt original,
+    both of which are full file paths.
+    """
+    return (not os.path.exists(derived)
+            or os.stat(derived).st_mtime < os.stat(original).st_mtime)
+
+
+def makefig(code, code_path, output_dir, output_base, config):
+    """
+    Run a pyplot script *code* and save the images under *output_dir*
+    with file names derived from *output_base*
+
+    """
+
+    # -- Parse format list
+    default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
+    formats = []
+    for fmt in config.plot_formats:
+        if isinstance(fmt, str):
+            formats.append((fmt, default_dpi.get(fmt, 80)))
+        elif type(fmt) in (tuple, list) and len(fmt)==2:
+            formats.append((str(fmt[0]), int(fmt[1])))
+        else:
+            raise PlotError('invalid image format "%r" in plot_formats' % fmt)
+
+    # -- Try to determine if all images already exist
+
+    code_pieces = split_code_at_show(code)
+
+    # Look for single-figure output files first
+    all_exists = True
+    img = ImageFile(output_base, output_dir)
+    for format, dpi in formats:
+        if out_of_date(code_path, img.filename(format)):
+            all_exists = False
+            break
+        img.formats.append(format)
+
+    if all_exists:
+        return [(code, [img])]
+
+    # Then look for multi-figure output files
+    results = []
+    all_exists = True
+    for i, code_piece in enumerate(code_pieces):
+        images = []
+        for j in range(1000):
+            img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
+            for format, dpi in formats:
+                if out_of_date(code_path, img.filename(format)):
+                    all_exists = False
+                    break
+                img.formats.append(format)
+
+            # assume that if we have one, we have them all
+            if not all_exists:
+                all_exists = (j > 0)
+                break
+            images.append(img)
+        if not all_exists:
+            break
+        results.append((code_piece, images))
+
+    if all_exists:
+        return results
+
+    # -- We didn't find the files, so build them
+
+    results = []
+    ns = {}
+
+    for i, code_piece in enumerate(code_pieces):
+        # Clear between runs
+        plt.close('all')
+
+        # Run code
+        run_code(code_piece, code_path, ns)
+
+        # Collect images
+        images = []
+        fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
+        for j, figman in enumerate(fig_managers):
+            if len(fig_managers) == 1 and len(code_pieces) == 1:
+                img = ImageFile(output_base, output_dir)
+            else:
+                img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
+                                output_dir)
+            images.append(img)
+            for format, dpi in formats:
+                try:
+                    figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
+                except exceptions.BaseException as err:
+                    raise PlotError(traceback.format_exc())
+                img.formats.append(format)
+
+        # Results
+        results.append((code_piece, images))
+
+    return results
+
+
+#------------------------------------------------------------------------------
+# Relative pathnames
+#------------------------------------------------------------------------------
+
+try:
+    from os.path import relpath
+except ImportError:
+    # Copied from Python 2.7
+    if 'posix' in sys.builtin_module_names:
+        def relpath(path, start=os.path.curdir):
+            """Return a relative version of a path"""
+            from os.path import sep, curdir, join, abspath, commonprefix, \
+                 pardir
+
+            if not path:
+                raise ValueError("no path specified")
+
+            start_list = abspath(start).split(sep)
+            path_list = abspath(path).split(sep)
+
+            # Work out how much of the filepath is shared by start and path.
+            i = len(commonprefix([start_list, path_list]))
+
+            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+            if not rel_list:
+                return curdir
+            return join(*rel_list)
+    elif 'nt' in sys.builtin_module_names:
+        def relpath(path, start=os.path.curdir):
+            """Return a relative version of a path"""
+            from os.path import sep, curdir, join, abspath, commonprefix, \
+                 pardir, splitunc
+
+            if not path:
+                raise ValueError("no path specified")
+            start_list = abspath(start).split(sep)
+            path_list = abspath(path).split(sep)
+            if start_list[0].lower() != path_list[0].lower():
+                unc_path, rest = splitunc(path)
+                unc_start, rest = splitunc(start)
+                if bool(unc_path) ^ bool(unc_start):
+                    raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
+                                                                        % (path, start))
+                else:
+                    raise ValueError("path is on drive %s, start on drive %s"
+                                                        % (path_list[0], start_list[0]))
+            # Work out how much of the filepath is shared by start and path.
+            for i in range(min(len(start_list), len(path_list))):
+                if start_list[i].lower() != path_list[i].lower():
+                    break
+            else:
+                i += 1
+
+            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+            if not rel_list:
+                return curdir
+            return join(*rel_list)
+    else:
+        raise RuntimeError("Unsupported platform (no relpath available!)")
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_docscrape.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_docscrape.py
new file mode 100644
index 0000000..b682504
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/tests/test_docscrape.py
@@ -0,0 +1,767 @@
+# -*- encoding:utf-8 -*-
+from __future__ import division, absolute_import, print_function
+
+import sys, textwrap
+
+from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
+from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
+from nose.tools import *
+
+if sys.version_info[0] >= 3:
+    sixu = lambda s: s
+else:
+    sixu = lambda s: unicode(s, 'unicode_escape')
+
+
+doc_txt = '''\
+  numpy.multivariate_normal(mean, cov, shape=None, spam=None)
+
+  Draw values from a multivariate normal distribution with specified
+  mean and covariance.
+
+  The multivariate normal or Gaussian distribution is a generalisation
+  of the one-dimensional normal distribution to higher dimensions.
+
+  Parameters
+  ----------
+  mean : (N,) ndarray
+      Mean of the N-dimensional distribution.
+
+      .. math::
+
+         (1+2+3)/3
+
+  cov : (N, N) ndarray
+      Covariance matrix of the distribution.
+  shape : tuple of ints
+      Given a shape of, for example, (m,n,k), m*n*k samples are
+      generated, and packed in an m-by-n-by-k arrangement.  Because
+      each sample is N-dimensional, the output shape is (m,n,k,N).
+
+  Returns
+  -------
+  out : ndarray
+      The drawn samples, arranged according to `shape`.  If the
+      shape given is (m,n,...), then the shape of `out` is is
+      (m,n,...,N).
+
+      In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+      value drawn from the distribution.
+  list of str
+      This is not a real return value.  It exists to test
+      anonymous return values.
+
+  Other Parameters
+  ----------------
+  spam : parrot
+      A parrot off its mortal coil.
+
+  Raises
+  ------
+  RuntimeError
+      Some error
+
+  Warns
+  -----
+  RuntimeWarning
+      Some warning
+
+  Warnings
+  --------
+  Certain warnings apply.
+
+  Notes
+  -----
+  Instead of specifying the full covariance matrix, popular
+  approximations include:
+
+    - Spherical covariance (`cov` is a multiple of the identity matrix)
+    - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+  This geometrical property can be seen in two dimensions by plotting
+  generated data-points:
+
+  >>> mean = [0,0]
+  >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+  >>> x,y = multivariate_normal(mean,cov,5000).T
+  >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+  Note that the covariance matrix must be symmetric and non-negative
+  definite.
+
+  References
+  ----------
+  .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+         Processes," 3rd ed., McGraw-Hill Companies, 1991
+  .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+         2nd ed., Wiley, 2001.
+
+  See Also
+  --------
+  some, other, funcs
+  otherfunc : relationship
+
+  Examples
+  --------
+  >>> mean = (1,2)
+  >>> cov = [[1,0],[1,0]]
+  >>> x = multivariate_normal(mean,cov,(3,3))
+  >>> print x.shape
+  (3, 3, 2)
+
+  The following is probably true, given that 0.6 is roughly twice the
+  standard deviation:
+
+  >>> print list( (x[0,0,:] - mean) < 0.6 )
+  [True, True]
+
+  .. index:: random
+     :refguide: random;distributions, random;gauss
+
+  '''
+doc = NumpyDocString(doc_txt)
+
+
+def test_signature():
+    assert doc['Signature'].startswith('numpy.multivariate_normal(')
+    assert doc['Signature'].endswith('spam=None)')
+
+def test_summary():
+    assert doc['Summary'][0].startswith('Draw values')
+    assert doc['Summary'][-1].endswith('covariance.')
+
+def test_extended_summary():
+    assert doc['Extended Summary'][0].startswith('The multivariate normal')
+
+def test_parameters():
+    assert_equal(len(doc['Parameters']), 3)
+    assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
+
+    arg, arg_type, desc = doc['Parameters'][1]
+    assert_equal(arg_type, '(N, N) ndarray')
+    assert desc[0].startswith('Covariance matrix')
+    assert doc['Parameters'][0][-1][-2] == '   (1+2+3)/3'
+
+def test_other_parameters():
+    assert_equal(len(doc['Other Parameters']), 1)
+    assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
+    arg, arg_type, desc = doc['Other Parameters'][0]
+    assert_equal(arg_type, 'parrot')
+    assert desc[0].startswith('A parrot off its mortal coil')
+
+def test_returns():
+    assert_equal(len(doc['Returns']), 2)
+    arg, arg_type, desc = doc['Returns'][0]
+    assert_equal(arg, 'out')
+    assert_equal(arg_type, 'ndarray')
+    assert desc[0].startswith('The drawn samples')
+    assert desc[-1].endswith('distribution.')
+
+    arg, arg_type, desc = doc['Returns'][1]
+    assert_equal(arg, 'list of str')
+    assert_equal(arg_type, '')
+    assert desc[0].startswith('This is not a real')
+    assert desc[-1].endswith('anonymous return values.')
+
+def test_notes():
+    assert doc['Notes'][0].startswith('Instead')
+    assert doc['Notes'][-1].endswith('definite.')
+    assert_equal(len(doc['Notes']), 17)
+
+def test_references():
+    assert doc['References'][0].startswith('..')
+    assert doc['References'][-1].endswith('2001.')
+
+def test_examples():
+    assert doc['Examples'][0].startswith('>>>')
+    assert doc['Examples'][-1].endswith('True]')
+
+def test_index():
+    assert_equal(doc['index']['default'], 'random')
+    assert_equal(len(doc['index']), 2)
+    assert_equal(len(doc['index']['refguide']), 2)
+
+def non_blank_line_by_line_compare(a,b):
+    a = textwrap.dedent(a)
+    b = textwrap.dedent(b)
+    a = [l.rstrip() for l in a.split('\n') if l.strip()]
+    b = [l.rstrip() for l in b.split('\n') if l.strip()]
+    for n,line in enumerate(a):
+        if not line == b[n]:
+            raise AssertionError("Lines %s of a and b differ: "
+                                 "\n>>> %s\n<<< %s\n" %
+                                 (n,line,b[n]))
+def test_str():
+    non_blank_line_by_line_compare(str(doc),
+"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
+
+Draw values from a multivariate normal distribution with specified
+mean and covariance.
+
+The multivariate normal or Gaussian distribution is a generalisation
+of the one-dimensional normal distribution to higher dimensions.
+
+Parameters
+----------
+mean : (N,) ndarray
+    Mean of the N-dimensional distribution.
+
+    .. math::
+
+       (1+2+3)/3
+
+cov : (N, N) ndarray
+    Covariance matrix of the distribution.
+shape : tuple of ints
+    Given a shape of, for example, (m,n,k), m*n*k samples are
+    generated, and packed in an m-by-n-by-k arrangement.  Because
+    each sample is N-dimensional, the output shape is (m,n,k,N).
+
+Returns
+-------
+out : ndarray
+    The drawn samples, arranged according to `shape`.  If the
+    shape given is (m,n,...), then the shape of `out` is is
+    (m,n,...,N).
+
+    In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+    value drawn from the distribution.
+list of str
+    This is not a real return value.  It exists to test
+    anonymous return values.
+
+Other Parameters
+----------------
+spam : parrot
+    A parrot off its mortal coil.
+
+Raises
+------
+RuntimeError
+    Some error
+
+Warns
+-----
+RuntimeWarning
+    Some warning
+
+Warnings
+--------
+Certain warnings apply.
+
+See Also
+--------
+`some`_, `other`_, `funcs`_
+
+`otherfunc`_
+    relationship
+
+Notes
+-----
+Instead of specifying the full covariance matrix, popular
+approximations include:
+
+  - Spherical covariance (`cov` is a multiple of the identity matrix)
+  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+This geometrical property can be seen in two dimensions by plotting
+generated data-points:
+
+>>> mean = [0,0]
+>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+>>> x,y = multivariate_normal(mean,cov,5000).T
+>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+Note that the covariance matrix must be symmetric and non-negative
+definite.
+
+References
+----------
+.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+       Processes," 3rd ed., McGraw-Hill Companies, 1991
+.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+       2nd ed., Wiley, 2001.
+
+Examples
+--------
+>>> mean = (1,2)
+>>> cov = [[1,0],[1,0]]
+>>> x = multivariate_normal(mean,cov,(3,3))
+>>> print x.shape
+(3, 3, 2)
+
+The following is probably true, given that 0.6 is roughly twice the
+standard deviation:
+
+>>> print list( (x[0,0,:] - mean) < 0.6 )
+[True, True]
+
+.. index:: random
+   :refguide: random;distributions, random;gauss""")
+
+
+def test_sphinx_str():
+    sphinx_doc = SphinxDocString(doc_txt)
+    non_blank_line_by_line_compare(str(sphinx_doc),
+"""
+.. index:: random
+   single: random;distributions, random;gauss
+
+Draw values from a multivariate normal distribution with specified
+mean and covariance.
+
+The multivariate normal or Gaussian distribution is a generalisation
+of the one-dimensional normal distribution to higher dimensions.
+
+:Parameters:
+
+    **mean** : (N,) ndarray
+
+        Mean of the N-dimensional distribution.
+
+        .. math::
+
+           (1+2+3)/3
+
+    **cov** : (N, N) ndarray
+
+        Covariance matrix of the distribution.
+
+    **shape** : tuple of ints
+
+        Given a shape of, for example, (m,n,k), m*n*k samples are
+        generated, and packed in an m-by-n-by-k arrangement.  Because
+        each sample is N-dimensional, the output shape is (m,n,k,N).
+
+:Returns:
+
+    **out** : ndarray
+
+        The drawn samples, arranged according to `shape`.  If the
+        shape given is (m,n,...), then the shape of `out` is is
+        (m,n,...,N).
+
+        In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+        value drawn from the distribution.
+
+    list of str
+
+        This is not a real return value.  It exists to test
+        anonymous return values.
+
+:Other Parameters:
+
+    **spam** : parrot
+
+        A parrot off its mortal coil.
+
+:Raises:
+
+    **RuntimeError**
+
+        Some error
+
+:Warns:
+
+    **RuntimeWarning**
+
+        Some warning
+
+.. warning::
+
+    Certain warnings apply.
+
+.. seealso::
+
+    :obj:`some`, :obj:`other`, :obj:`funcs`
+
+    :obj:`otherfunc`
+        relationship
+
+.. rubric:: Notes
+
+Instead of specifying the full covariance matrix, popular
+approximations include:
+
+  - Spherical covariance (`cov` is a multiple of the identity matrix)
+  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+This geometrical property can be seen in two dimensions by plotting
+generated data-points:
+
+>>> mean = [0,0]
+>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+>>> x,y = multivariate_normal(mean,cov,5000).T
+>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+Note that the covariance matrix must be symmetric and non-negative
+definite.
+
+.. rubric:: References
+
+.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+       Processes," 3rd ed., McGraw-Hill Companies, 1991
+.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+       2nd ed., Wiley, 2001.
+
+.. only:: latex
+
+   [1]_, [2]_
+
+.. rubric:: Examples
+
+>>> mean = (1,2)
+>>> cov = [[1,0],[1,0]]
+>>> x = multivariate_normal(mean,cov,(3,3))
+>>> print x.shape
+(3, 3, 2)
+
+The following is probably true, given that 0.6 is roughly twice the
+standard deviation:
+
+>>> print list( (x[0,0,:] - mean) < 0.6 )
+[True, True]
+""")
+
+
+doc2 = NumpyDocString("""
+    Returns array of indices of the maximum values of along the given axis.
+
+    Parameters
+    ----------
+    a : {array_like}
+        Array to look in.
+    axis : {None, integer}
+        If None, the index is into the flattened array, otherwise along
+        the specified axis""")
+
+def test_parameters_without_extended_description():
+    assert_equal(len(doc2['Parameters']), 2)
+
+doc3 = NumpyDocString("""
+    my_signature(*params, **kwds)
+
+    Return this and that.
+    """)
+
+def test_escape_stars():
+    signature = str(doc3).split('\n')[0]
+    assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
+
+doc4 = NumpyDocString(
+    """a.conj()
+
+    Return an array with all complex-valued elements conjugated.""")
+
+def test_empty_extended_summary():
+    assert_equal(doc4['Extended Summary'], [])
+
+doc5 = NumpyDocString(
+    """
+    a.something()
+
+    Raises
+    ------
+    LinAlgException
+        If array is singular.
+
+    Warns
+    -----
+    SomeWarning
+        If needed
+    """)
+
+def test_raises():
+    assert_equal(len(doc5['Raises']), 1)
+    name,_,desc = doc5['Raises'][0]
+    assert_equal(name,'LinAlgException')
+    assert_equal(desc,['If array is singular.'])
+
+def test_warns():
+    assert_equal(len(doc5['Warns']), 1)
+    name,_,desc = doc5['Warns'][0]
+    assert_equal(name,'SomeWarning')
+    assert_equal(desc,['If needed'])
+
+def test_see_also():
+    doc6 = NumpyDocString(
+    """
+    z(x,theta)
+
+    See Also
+    --------
+    func_a, func_b, func_c
+    func_d : some equivalent func
+    foo.func_e : some other func over
+             multiple lines
+    func_f, func_g, :meth:`func_h`, func_j,
+    func_k
+    :obj:`baz.obj_q`
+    :class:`class_j`: fubar
+        foobar
+    """)
+
+    assert len(doc6['See Also']) == 12
+    for func, desc, role in doc6['See Also']:
+        if func in ('func_a', 'func_b', 'func_c', 'func_f',
+                    'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
+            assert(not desc)
+        else:
+            assert(desc)
+
+        if func == 'func_h':
+            assert role == 'meth'
+        elif func == 'baz.obj_q':
+            assert role == 'obj'
+        elif func == 'class_j':
+            assert role == 'class'
+        else:
+            assert role is None
+
+        if func == 'func_d':
+            assert desc == ['some equivalent func']
+        elif func == 'foo.func_e':
+            assert desc == ['some other func over', 'multiple lines']
+        elif func == 'class_j':
+            assert desc == ['fubar', 'foobar']
+
+def test_see_also_print():
+    class Dummy(object):
+        """
+        See Also
+        --------
+        func_a, func_b
+        func_c : some relationship
+                 goes here
+        func_d
+        """
+        pass
+
+    obj = Dummy()
+    s = str(FunctionDoc(obj, role='func'))
+    assert(':func:`func_a`, :func:`func_b`' in s)
+    assert('    some relationship' in s)
+    assert(':func:`func_d`' in s)
+
+doc7 = NumpyDocString("""
+
+        Doc starts on second line.
+
+        """)
+
+def test_empty_first_line():
+    assert doc7['Summary'][0].startswith('Doc starts')
+
+
+def test_no_summary():
+    str(SphinxDocString("""
+    Parameters
+    ----------"""))
+
+
+def test_unicode():
+    doc = SphinxDocString("""
+    öäöäöäöäöåååå
+
+    öäöäöäööäååå
+
+    Parameters
+    ----------
+    ååå : äää
+        ööö
+
+    Returns
+    -------
+    ååå : ööö
+        äää
+
+    """)
+    assert isinstance(doc['Summary'][0], str)
+    assert doc['Summary'][0] == 'öäöäöäöäöåååå'
+
+def test_plot_examples():
+    cfg = dict(use_plots=True)
+
+    doc = SphinxDocString("""
+    Examples
+    --------
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot([1,2,3],[4,5,6])
+    >>> plt.show()
+    """, config=cfg)
+    assert 'plot::' in str(doc), str(doc)
+
+    doc = SphinxDocString("""
+    Examples
+    --------
+    .. plot::
+
+       import matplotlib.pyplot as plt
+       plt.plot([1,2,3],[4,5,6])
+       plt.show()
+    """, config=cfg)
+    assert str(doc).count('plot::') == 1, str(doc)
+
+def test_class_members():
+
+    class Dummy(object):
+        """
+        Dummy class.
+
+        """
+        def spam(self, a, b):
+            """Spam\n\nSpam spam."""
+            pass
+        def ham(self, c, d):
+            """Cheese\n\nNo cheese."""
+            pass
+        @property
+        def spammity(self):
+            """Spammity index"""
+            return 0.95
+
+        class Ignorable(object):
+            """local class, to be ignored"""
+            pass
+
+    for cls in (ClassDoc, SphinxClassDoc):
+        doc = cls(Dummy, config=dict(show_class_members=False))
+        assert 'Methods' not in str(doc), (cls, str(doc))
+        assert 'spam' not in str(doc), (cls, str(doc))
+        assert 'ham' not in str(doc), (cls, str(doc))
+        assert 'spammity' not in str(doc), (cls, str(doc))
+        assert 'Spammity index' not in str(doc), (cls, str(doc))
+
+        doc = cls(Dummy, config=dict(show_class_members=True))
+        assert 'Methods' in str(doc), (cls, str(doc))
+        assert 'spam' in str(doc), (cls, str(doc))
+        assert 'ham' in str(doc), (cls, str(doc))
+        assert 'spammity' in str(doc), (cls, str(doc))
+
+        if cls is SphinxClassDoc:
+            assert '.. autosummary::' in str(doc), str(doc)
+        else:
+            assert 'Spammity index' in str(doc), str(doc)
+
+def test_duplicate_signature():
+    # Duplicate function signatures occur e.g. in ufuncs, when the
+    # automatic mechanism adds one, and a more detailed comes from the
+    # docstring itself.
+
+    doc = NumpyDocString(
+    """
+    z(x1, x2)
+
+    z(a, theta)
+    """)
+
+    assert doc['Signature'].strip() == 'z(a, theta)'
+
+
+class_doc_txt = """
+    Foo
+
+    Parameters
+    ----------
+    f : callable ``f(t, y, *f_args)``
+        Aaa.
+    jac : callable ``jac(t, y, *jac_args)``
+        Bbb.
+
+    Attributes
+    ----------
+    t : float
+        Current time.
+    y : ndarray
+        Current variable values.
+
+    Methods
+    -------
+    a
+    b
+    c
+
+    Examples
+    --------
+    For usage examples, see `ode`.
+"""
+
+def test_class_members_doc():
+    doc = ClassDoc(None, class_doc_txt)
+    non_blank_line_by_line_compare(str(doc),
+    """
+    Foo
+
+    Parameters
+    ----------
+    f : callable ``f(t, y, *f_args)``
+        Aaa.
+    jac : callable ``jac(t, y, *jac_args)``
+        Bbb.
+
+    Examples
+    --------
+    For usage examples, see `ode`.
+
+    Attributes
+    ----------
+    t : float
+        Current time.
+    y : ndarray
+        Current variable values.
+
+    Methods
+    -------
+    a
+
+    b
+
+    c
+
+    .. index::
+
+    """)
+
+def test_class_members_doc_sphinx():
+    doc = SphinxClassDoc(None, class_doc_txt)
+    non_blank_line_by_line_compare(str(doc),
+    """
+    Foo
+
+    :Parameters:
+
+        **f** : callable ``f(t, y, *f_args)``
+
+            Aaa.
+
+        **jac** : callable ``jac(t, y, *jac_args)``
+
+            Bbb.
+
+    .. rubric:: Examples
+
+    For usage examples, see `ode`.
+
+    .. rubric:: Attributes
+
+    ===  ==========
+      t  (float) Current time.
+      y  (ndarray) Current variable values.
+    ===  ==========
+
+    .. rubric:: Methods
+
+    ===  ==========
+      a
+      b
+      c
+    ===  ==========
+
+    """)
+
+if __name__ == "__main__":
+    import nose
+    nose.run()
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_linkcode.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_linkcode.py
new file mode 100644
index 0000000..340166a
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/tests/test_linkcode.py
@@ -0,0 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
+import numpydoc.linkcode
+
+# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_phantom_import.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_phantom_import.py
new file mode 100644
index 0000000..80fae08
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/tests/test_phantom_import.py
@@ -0,0 +1,12 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+from nose import SkipTest
+
+def test_import():
+    if sys.version_info[0] >= 3:
+        raise SkipTest("phantom_import not ported to Py3")
+
+    import numpydoc.phantom_import
+
+# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_plot_directive.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_plot_directive.py
new file mode 100644
index 0000000..1ea1076
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/tests/test_plot_directive.py
@@ -0,0 +1,11 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+from nose import SkipTest
+
+def test_import():
+    if sys.version_info[0] >= 3:
+        raise SkipTest("plot_directive not ported to Python 3 (use the one from Matplotlib instead)")
+    import numpydoc.plot_directive
+
+# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_traitsdoc.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_traitsdoc.py
new file mode 100644
index 0000000..fe5078c
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/tests/test_traitsdoc.py
@@ -0,0 +1,11 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+from nose import SkipTest
+
+def test_import():
+    if sys.version_info[0] >= 3:
+        raise SkipTest("traitsdoc not ported to Python3")
+    import numpydoc.traitsdoc
+
+# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/traitsdoc.py b/doc/sphinxext/numpydoc/numpydoc/traitsdoc.py
new file mode 100644
index 0000000..596c54e
--- /dev/null
+++ b/doc/sphinxext/numpydoc/numpydoc/traitsdoc.py
@@ -0,0 +1,142 @@
+"""
+=========
+traitsdoc
+=========
+
+Sphinx extension that handles docstrings in the Numpy standard format, [1]
+and support Traits [2].
+
+This extension can be used as a replacement for ``numpydoc`` when support
+for Traits is required.
+
+.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
+.. [2] http://code.enthought.com/projects/traits/
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import inspect
+import os
+import pydoc
+import collections
+
+from . import docscrape
+from . import docscrape_sphinx
+from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
+
+from . import numpydoc
+
+from . import comment_eater
+
+class SphinxTraitsDoc(SphinxClassDoc):
+    def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
+        if not inspect.isclass(cls):
+            raise ValueError("Initialise using a class. Got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+        self._name = cls.__name__
+        self._func_doc = func_doc
+
+        docstring = pydoc.getdoc(cls)
+        docstring = docstring.split('\n')
+
+        # De-indent paragraph
+        try:
+            indent = min(len(s) - len(s.lstrip()) for s in docstring
+                         if s.strip())
+        except ValueError:
+            indent = 0
+
+        for n,line in enumerate(docstring):
+            docstring[n] = docstring[n][indent:]
+
+        self._doc = docscrape.Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': '',
+            'Description': [],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Traits': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'References': '',
+            'Example': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Description'] + self['Extended Summary'] + ['']
+
+    def __str__(self, indent=0, func_role="func"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Traits', 'Methods',
+                           'Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_see_also("obj")
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_section('Example')
+        out += self._str_section('Examples')
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+def looks_like_issubclass(obj, classname):
+    """ Return True if the object has a class or superclass with the given class
+    name.
+
+    Ignores old-style classes.
+    """
+    t = obj
+    if t.__name__ == classname:
+        return True
+    for klass in t.__mro__:
+        if klass.__name__ == classname:
+            return True
+    return False
+
+def get_doc_object(obj, what=None, config=None):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif isinstance(obj, collections.Callable):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config)
+        if looks_like_issubclass(obj, 'HasTraits'):
+            for name, trait, comment in comment_eater.get_class_traits(obj):
+                # Exclude private traits.
+                if not name.startswith('_'):
+                    doc['Traits'].append((name, trait, comment.splitlines()))
+        return doc
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, '', config=config)
+    else:
+        return SphinxDocString(pydoc.getdoc(obj), config=config)
+
+def setup(app):
+    # init numpydoc
+    numpydoc.setup(app, get_doc_object)
+
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/README.md b/ipynbs/presentations/2014.05.13-ElBrogrammer/README.md
new file mode 100644
index 0000000..61ed699
--- /dev/null
+++ b/ipynbs/presentations/2014.05.13-ElBrogrammer/README.md
@@ -0,0 +1,9 @@
+Caporaso lab meeting presentation
+=================================
+
+A presentation of scikit-bio, its goals, and a few live demos.
+
+This was presented at the Caporaso lab meeting on 05/13/2014. It was tested
+against pre-0.1.0 scikit-bio so may not work with newer versions of scikit-bio,
+due to inevitable API changes that will happen. Tested using IPython Notebook
+2.0.0 in slideshow mode.
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/dm.txt b/ipynbs/presentations/2014.05.13-ElBrogrammer/dm.txt
new file mode 100644
index 0000000..bfed96e
--- /dev/null
+++ b/ipynbs/presentations/2014.05.13-ElBrogrammer/dm.txt
@@ -0,0 +1,440 @@
+	M12Aptr.140800	M41Kner.140735	F24Plmr.140433	M53Tong.140327	F31Indl.140679	M33Fotl.140701	F21Aptl.140686	F32Mout.140603	M22Mout.140541	F12Navl.140828	F34Tong.140848	M53Fcsw.140426	M33Frhd.140580	M22Fcsp.140527	F11Indr.140809	F14Plmr.140786	M24Plmr.140833	M31Plmr.140505	M23Fotr.140344	F12Forr.140302	M43Ewax.140650	F21Plml.140704	F24Knee.140744	M11Nose.140428	M41Pinl.140284	F21Fotr.140485	F24Plml.140452	M64Fotl.140777	F22Kner.140705	M41Aptl.140841	M12Fcsp.140668	M32Frhd.140776	M43Fcsw.140 [...]
+M12Aptr.140800	0.0	0.826168597185	0.809390567019	0.902414914795	0.804899558217	0.666575546794	0.618341052729	0.87402556035	0.865264057244	0.599927420128	0.882185876595	0.935628455772	0.750639068232	0.937378646066	0.817477980869	0.80737152464	0.815225158464	0.831686933475	0.745397365005	0.819918926307	0.699684165149	0.793059303246	0.805277199815	0.578298724825	0.725211102495	0.692605395656	0.722647776366	0.855259995071	0.816340010446	0.66665280502	0.931545861144	0.664630531143	0.942400217 [...]
+M41Kner.140735	0.826168597185	0.0	0.656337598787	0.756676929321	0.65094543097	0.678895730071	0.725193909843	0.729486678461	0.719639610483	0.775430320712	0.72685242582	0.833908777831	0.653316977754	0.803747359574	0.629913980926	0.63258468531	0.577440287105	0.639256305395	0.75807984077	0.623004904754	0.909140888957	0.656035185615	0.5730879435	0.831998377948	0.643028029417	0.919656915917	0.703710497461	0.672703191968	0.62986211286	0.742992971866	0.804280270572	0.753644788744	0.815854498183	 [...]
+F24Plmr.140433	0.809390567019	0.656337598787	0.0	0.687460870742	0.568259590868	0.637113199986	0.734515317293	0.656178471215	0.700776442256	0.778878486606	0.711132679213	0.857573858975	0.723040901311	0.832189430559	0.578793950315	0.572593083998	0.602073501905	0.569823249295	0.731702832918	0.612748333357	0.90917256997	0.592320712683	0.602765964794	0.83718379918	0.691847839348	0.928665821099	0.610056092802	0.677200068928	0.733830957781	0.792097275421	0.855668488202	0.741012437748	0.87664650 [...]
+M53Tong.140327	0.902414914795	0.756676929321	0.687460870742	0.0	0.778136986263	0.780649867091	0.86054881568	0.357830946284	0.419428592577	0.847998018553	0.322405439346	0.832381668514	0.722306509463	0.785019801624	0.631357548603	0.711416059309	0.738997986983	0.53288545637	0.854809486465	0.647435024634	0.92155897961	0.728652467975	0.717413603331	0.906638983605	0.69865932409	0.922282019353	0.834485707576	0.825807287427	0.782325078484	0.833747918759	0.832077896626	0.781208801809	0.8391336253 [...]
+F31Indl.140679	0.804899558217	0.65094543097	0.568259590868	0.778136986263	0.0	0.709492123391	0.715099023428	0.746838875894	0.729502567033	0.795117766057	0.762782970531	0.862831349663	0.732200481493	0.840555425662	0.650331446449	0.620727515753	0.567577672063	0.675263342445	0.707843230282	0.630931803005	0.900720497833	0.555375503942	0.658277483889	0.842057028189	0.67519967303	0.91993966723	0.603864158732	0.642738787624	0.71852242999	0.807656654947	0.863944286203	0.692592480543	0.8825720059 [...]
+M33Fotl.140701	0.666575546794	0.678895730071	0.637113199986	0.780649867091	0.709492123391	0.0	0.63169781161	0.759209454436	0.764855382818	0.644783397386	0.754166872489	0.845004031722	0.625933220763	0.84171521284	0.681584505095	0.648214746491	0.67347313471	0.674780424785	0.692552525721	0.689300831093	0.844032365726	0.653651638853	0.59474762765	0.706281346305	0.615525095828	0.847096010115	0.645969132921	0.724324782322	0.709267650335	0.715117242697	0.853254323569	0.63996980126	0.87029359047 [...]
+F21Aptl.140686	0.618341052729	0.725193909843	0.734515317293	0.86054881568	0.715099023428	0.63169781161	0.0	0.835531804673	0.83488598456	0.653742334734	0.84543407702	0.921293998095	0.696204468551	0.906403182119	0.753400996076	0.720727528566	0.714862590775	0.753829317011	0.716834044114	0.757436522062	0.796809505385	0.69821187735	0.714927898812	0.546281002456	0.618248436303	0.819288035008	0.679041560924	0.803905725245	0.758143044193	0.667095293856	0.904601563535	0.631425849007	0.91641233967 [...]
+F32Mout.140603	0.87402556035	0.729486678461	0.656178471215	0.357830946284	0.746838875894	0.759209454436	0.835531804673	0.0	0.3335129068	0.822842210591	0.325789814972	0.854647642047	0.710997228256	0.802922862317	0.534494584697	0.681158457428	0.708519661104	0.486380317112	0.833133360702	0.615597286787	0.886752962497	0.701964143288	0.705974857425	0.900029562376	0.681115820112	0.922576929966	0.805796172085	0.81583665377	0.779665782069	0.841743446445	0.850483932977	0.754142068177	0.8570617547 [...]
+M22Mout.140541	0.865264057244	0.719639610483	0.700776442256	0.419428592577	0.729502567033	0.764855382818	0.83488598456	0.3335129068	0.0	0.798653787984	0.445413480132	0.824651303204	0.686124343414	0.751773720556	0.587172520533	0.691171097229	0.691027675524	0.543168373115	0.826266040547	0.624451678028	0.916075534998	0.707369379155	0.730027890133	0.903219155576	0.671783593797	0.916405868479	0.801509420552	0.802954253191	0.754023525823	0.841396715844	0.822317511365	0.75194022718	0.8374609080 [...]
+F12Navl.140828	0.599927420128	0.775430320712	0.778878486606	0.847998018553	0.795117766057	0.644783397386	0.653742334734	0.822842210591	0.798653787984	0.0	0.816565628696	0.876550725627	0.670068031983	0.867142161818	0.807792592911	0.783751021205	0.79276796007	0.773676434265	0.791266758346	0.807950702417	0.746048929432	0.763960890838	0.757118171275	0.62428964297	0.613998696142	0.753569938801	0.750174619013	0.849947737078	0.796454021038	0.483253646054	0.893158649947	0.607556714269	0.89944041 [...]
+F34Tong.140848	0.882185876595	0.72685242582	0.711132679213	0.322405439346	0.762782970531	0.754166872489	0.84543407702	0.325789814972	0.445413480132	0.816565628696	0.0	0.821428218868	0.687625943695	0.757839331548	0.642240200043	0.734133878572	0.712269861258	0.566514722579	0.846436208669	0.648891680271	0.903146011046	0.709143890435	0.713023080085	0.886404924435	0.650718760392	0.902435369407	0.832115811772	0.82616907955	0.789497731168	0.81398369781	0.830853986794	0.739162587688	0.8365399683 [...]
+M53Fcsw.140426	0.935628455772	0.833908777831	0.857573858975	0.832381668514	0.862831349663	0.845004031722	0.921293998095	0.854647642047	0.824651303204	0.876550725627	0.821428218868	0.0	0.812878115422	0.601261632652	0.875548041146	0.867147165973	0.803270381576	0.85224502897	0.911795145972	0.889176078872	0.954707018567	0.878370600639	0.886279436852	0.933343268709	0.865261054538	0.951205774977	0.909464884952	0.846020520564	0.814746490489	0.885208648796	0.681539576475	0.899885946014	0.6204946 [...]
+M33Frhd.140580	0.750639068232	0.653316977754	0.723040901311	0.722306509463	0.732200481493	0.625933220763	0.696204468551	0.710997228256	0.686124343414	0.670068031983	0.687625943695	0.812878115422	0.0	0.825575452157	0.696884831551	0.66379190016	0.676263670899	0.668044468284	0.797212003973	0.729316661277	0.826540294369	0.691747743374	0.617347131822	0.709365432786	0.57631283937	0.861272832636	0.753491109117	0.762491581256	0.633027836191	0.691566759625	0.800772997762	0.636954443568	0.81775481 [...]
+M22Fcsp.140527	0.937378646066	0.803747359574	0.832189430559	0.785019801624	0.840555425662	0.84171521284	0.906403182119	0.802922862317	0.751773720556	0.867142161818	0.757839331548	0.601261632652	0.825575452157	0.0	0.859873897887	0.840367373023	0.782256098124	0.828605048628	0.901800928539	0.843971309322	0.953327881021	0.849146356485	0.86850851583	0.935117227668	0.835420226958	0.949601399347	0.910202787349	0.840240263499	0.786815859489	0.88197814577	0.638227510567	0.904960126506	0.654063992 [...]
+F11Indr.140809	0.817477980869	0.629913980926	0.578793950315	0.631357548603	0.650331446449	0.681584505095	0.753400996076	0.534494584697	0.587172520533	0.807792592911	0.642240200043	0.875548041146	0.696884831551	0.859873897887	0.0	0.588216857477	0.595965350287	0.461368102497	0.732396243138	0.524686361678	0.888786761749	0.576586802058	0.623396693525	0.838522188533	0.65253888325	0.924546332934	0.700858327291	0.685608436405	0.712330000521	0.807222885062	0.870731683697	0.735907409089	0.8876339 [...]
+F14Plmr.140786	0.80737152464	0.63258468531	0.572593083998	0.711416059309	0.620727515753	0.648214746491	0.720727528566	0.681158457428	0.691171097229	0.783751021205	0.734133878572	0.867147165973	0.66379190016	0.840367373023	0.588216857477	0.0	0.569609729284	0.636796780542	0.712152204933	0.590251776201	0.910737118163	0.572261387928	0.623439749177	0.826853783263	0.636549421426	0.927377770859	0.662212958284	0.648018513845	0.686623023592	0.806813333234	0.863466247526	0.743752981217	0.877012110 [...]
+M24Plmr.140833	0.815225158464	0.577440287105	0.602073501905	0.738997986983	0.567577672063	0.67347313471	0.714862590775	0.708519661104	0.691027675524	0.79276796007	0.712269861258	0.803270381576	0.676263670899	0.782256098124	0.595965350287	0.569609729284	0.0	0.609706252617	0.714893153293	0.576623393263	0.893971025017	0.570074969926	0.64600339039	0.837866619893	0.578031144513	0.924577746405	0.667140473738	0.628103375173	0.652651180803	0.802319452637	0.812289118151	0.746295320598	0.842207471 [...]
+M31Plmr.140505	0.831686933475	0.639256305395	0.569823249295	0.53288545637	0.675263342445	0.674780424785	0.753829317011	0.486380317112	0.543168373115	0.773676434265	0.566514722579	0.85224502897	0.668044468284	0.828605048628	0.461368102497	0.636796780542	0.609706252617	0.0	0.805295778683	0.575014443901	0.899848599894	0.611033975797	0.615544352369	0.822280756551	0.625412266844	0.916787588114	0.736442126595	0.75160361947	0.716064749375	0.773666329574	0.833658875872	0.69612666706	0.8539619293 [...]
+M23Fotr.140344	0.745397365005	0.75807984077	0.731702832918	0.854809486465	0.707843230282	0.692552525721	0.716834044114	0.833133360702	0.826266040547	0.791266758346	0.846436208669	0.911795145972	0.797212003973	0.901800928539	0.732396243138	0.712152204933	0.714893153293	0.805295778683	0.0	0.743807717437	0.890238144228	0.677297074456	0.718723817002	0.837805880136	0.782218234931	0.881942237305	0.633077761715	0.696757595186	0.801731416765	0.805854487411	0.923510132906	0.744473911105	0.9278601 [...]
+F12Forr.140302	0.819918926307	0.623004904754	0.612748333357	0.647435024634	0.630931803005	0.689300831093	0.757436522062	0.615597286787	0.624451678028	0.807950702417	0.648891680271	0.889176078872	0.729316661277	0.843971309322	0.524686361678	0.590251776201	0.576623393263	0.575014443901	0.743807717437	0.0	0.921071034215	0.553085186891	0.64259313819	0.856565054273	0.633065844337	0.92878282195	0.663050418947	0.648371613427	0.674813319751	0.821290673932	0.879763384859	0.75317016772	0.887270771 [...]
+M43Ewax.140650	0.699684165149	0.909140888957	0.90917256997	0.92155897961	0.900720497833	0.844032365726	0.796809505385	0.886752962497	0.916075534998	0.746048929432	0.903146011046	0.954707018567	0.826540294369	0.953327881021	0.888786761749	0.910737118163	0.893971025017	0.899848599894	0.890238144228	0.921071034215	0.0	0.901690917332	0.892421444333	0.661220754152	0.782123283725	0.565104447367	0.870798942786	0.936751805854	0.911909622485	0.716772619134	0.950610184479	0.768885058457	0.96561237 [...]
+F21Plml.140704	0.793059303246	0.656035185615	0.592320712683	0.728652467975	0.555375503942	0.653651638853	0.69821187735	0.701964143288	0.707369379155	0.763960890838	0.709143890435	0.878370600639	0.691747743374	0.849146356485	0.576586802058	0.572261387928	0.570074969926	0.611033975797	0.677297074456	0.553085186891	0.901690917332	0.0	0.646082380435	0.817026419337	0.631804176948	0.912038802889	0.675286143833	0.61247998045	0.717210099607	0.782185373444	0.869176469535	0.687470946568	0.88878059 [...]
+F24Knee.140744	0.805277199815	0.5730879435	0.602765964794	0.717413603331	0.658277483889	0.59474762765	0.714927898812	0.705974857425	0.730027890133	0.757118171275	0.713023080085	0.886279436852	0.617347131822	0.86850851583	0.623396693525	0.623439749177	0.64600339039	0.615544352369	0.718723817002	0.64259313819	0.892421444333	0.646082380435	0.0	0.80966574282	0.602402233088	0.903078045826	0.674482341184	0.688996353785	0.645698553007	0.74556692451	0.877857365593	0.689026688505	0.888609252529	0 [...]
+M11Nose.140428	0.578298724825	0.831998377948	0.83718379918	0.906638983605	0.842057028189	0.706281346305	0.546281002456	0.900029562376	0.903219155576	0.62428964297	0.886404924435	0.933343268709	0.709365432786	0.935117227668	0.838522188533	0.826853783263	0.837866619893	0.822280756551	0.837805880136	0.856565054273	0.661220754152	0.817026419337	0.80966574282	0.0	0.713676349085	0.666212438308	0.814217103691	0.883930845126	0.834881970556	0.558397861233	0.911839256645	0.663603067759	0.918506552 [...]
+M41Pinl.140284	0.725211102495	0.643028029417	0.691847839348	0.69865932409	0.67519967303	0.615525095828	0.618248436303	0.681115820112	0.671783593797	0.613998696142	0.650718760392	0.865261054538	0.57631283937	0.835420226958	0.65253888325	0.636549421426	0.578031144513	0.625412266844	0.782218234931	0.633065844337	0.782123283725	0.631804176948	0.602402233088	0.713676349085	0.0	0.850477200424	0.718488175806	0.756608100855	0.660324857377	0.692354305631	0.847844867895	0.574925927671	0.8727000518 [...]
+F21Fotr.140485	0.692605395656	0.919656915917	0.928665821099	0.922282019353	0.91993966723	0.847096010115	0.819288035008	0.922576929966	0.916405868479	0.753569938801	0.902435369407	0.951205774977	0.861272832636	0.949601399347	0.924546332934	0.927377770859	0.924577746405	0.916787588114	0.881942237305	0.92878282195	0.565104447367	0.912038802889	0.903078045826	0.666212438308	0.850477200424	0.0	0.89204941988	0.94272939925	0.92541874389	0.72584180179	0.951342358661	0.799036273459	0.963631044871 [...]
+F24Plml.140452	0.722647776366	0.703710497461	0.610056092802	0.834485707576	0.603864158732	0.645969132921	0.679041560924	0.805796172085	0.801509420552	0.750174619013	0.832115811772	0.909464884952	0.753491109117	0.910202787349	0.700858327291	0.662212958284	0.667140473738	0.736442126595	0.633077761715	0.663050418947	0.870798942786	0.675286143833	0.674482341184	0.814217103691	0.718488175806	0.89204941988	0.0	0.719232549628	0.755827145472	0.758248760531	0.896220990193	0.708038029386	0.9269433 [...]
+M64Fotl.140777	0.855259995071	0.672703191968	0.677200068928	0.825807287427	0.642738787624	0.724324782322	0.803905725245	0.81583665377	0.802954253191	0.849947737078	0.82616907955	0.846020520564	0.762491581256	0.840240263499	0.685608436405	0.648018513845	0.628103375173	0.75160361947	0.696757595186	0.648371613427	0.936751805854	0.61247998045	0.688996353785	0.883930845126	0.756608100855	0.94272939925	0.719232549628	0.0	0.663747420802	0.861144820859	0.835124162412	0.822730500266	0.85065461128 [...]
+F22Kner.140705	0.816340010446	0.62986211286	0.733830957781	0.782325078484	0.71852242999	0.709267650335	0.758143044193	0.779665782069	0.754023525823	0.796454021038	0.789497731168	0.814746490489	0.633027836191	0.786815859489	0.712330000521	0.686623023592	0.652651180803	0.716064749375	0.801731416765	0.674813319751	0.911909622485	0.717210099607	0.645698553007	0.834881970556	0.660324857377	0.92541874389	0.755827145472	0.663747420802	0.0	0.80779693097	0.748882495381	0.774454721432	0.7712047387 [...]
+M41Aptl.140841	0.66665280502	0.742992971866	0.792097275421	0.833747918759	0.807656654947	0.715117242697	0.667095293856	0.841743446445	0.841396715844	0.483253646054	0.81398369781	0.885208648796	0.691566759625	0.88197814577	0.807222885062	0.806813333234	0.802319452637	0.773666329574	0.805854487411	0.821290673932	0.716772619134	0.782185373444	0.74556692451	0.558397861233	0.692354305631	0.72584180179	0.758248760531	0.861144820859	0.80779693097	0.0	0.907141705218	0.663122786305	0.913531156217 [...]
+M12Fcsp.140668	0.931545861144	0.804280270572	0.855668488202	0.832077896626	0.863944286203	0.853254323569	0.904601563535	0.850483932977	0.822317511365	0.893158649947	0.830853986794	0.681539576475	0.800772997762	0.638227510567	0.870731683697	0.863466247526	0.812289118151	0.833658875872	0.923510132906	0.879763384859	0.950610184479	0.869176469535	0.877857365593	0.911839256645	0.847844867895	0.951342358661	0.896220990193	0.835124162412	0.748882495381	0.907141705218	0.0	0.873246604302	0.618953 [...]
+M32Frhd.140776	0.664630531143	0.753644788744	0.741012437748	0.781208801809	0.692592480543	0.63996980126	0.631425849007	0.754142068177	0.75194022718	0.607556714269	0.739162587688	0.899885946014	0.636954443568	0.904960126506	0.735907409089	0.743752981217	0.746295320598	0.69612666706	0.744473911105	0.75317016772	0.768885058457	0.687470946568	0.689026688505	0.663603067759	0.574925927671	0.799036273459	0.708038029386	0.822730500266	0.774454721432	0.663122786305	0.873246604302	0.0	0.8936895981 [...]
+M43Fcsw.140714	0.942400217468	0.815854498183	0.876646504962	0.839133625357	0.88257200593	0.870293590476	0.916412339679	0.857061754755	0.837460908062	0.899440415163	0.836539968349	0.620494689947	0.817754813774	0.654063992645	0.887633924858	0.877012110567	0.842207471949	0.853961929354	0.927860112446	0.88727077144	0.965612371356	0.888780592323	0.888609252529	0.918506552735	0.872700051852	0.963631044871	0.92694336083	0.850654611288	0.771204738756	0.913531156217	0.618953089559	0.893689598197	 [...]
+M43Knee.140412	0.819139711348	0.644055069811	0.64131984153	0.797124854513	0.643224062601	0.693119713783	0.736418830073	0.775740277299	0.765478931352	0.797683905197	0.7946788163	0.875656233769	0.702880798128	0.861031822268	0.660810721122	0.618850033663	0.595165040048	0.707812418744	0.703875168467	0.630081858701	0.903727169824	0.612162022775	0.66035825056	0.843121197327	0.624760777496	0.928363767127	0.696203399597	0.61933593565	0.623648710011	0.814860244298	0.852724561214	0.76636021409	0.8 [...]
+F14Tong.140430	0.858291617851	0.785632632217	0.7404565909	0.424744245207	0.76313590097	0.758695368215	0.83841898009	0.41149171993	0.415669920077	0.799472579822	0.289735923495	0.823859475828	0.688671160512	0.75249872264	0.697824620611	0.737999035167	0.744382177031	0.634437766177	0.853033629914	0.684953945408	0.878630110656	0.722161907834	0.751748631746	0.876486185699	0.640941953406	0.875313996962	0.818035400009	0.845424469943	0.783094242738	0.803901746474	0.846229057641	0.748760295936	0.8 [...]
+F22Fcsp.140783	0.954564876065	0.815724486913	0.88675301716	0.846350989276	0.88966904837	0.892116427769	0.930734972667	0.865570432545	0.829907756905	0.915014521928	0.84221599143	0.708346700471	0.83099943318	0.710584343121	0.890657225177	0.889571129713	0.84849462709	0.861468439625	0.939057301168	0.881469536387	0.964000823547	0.883773233323	0.90366271742	0.954428187651	0.862044744839	0.961823560084	0.934713485088	0.848303230896	0.719284229102	0.922750346744	0.63917234417	0.915967848768	0.69 [...]
+F34Ewax.140285	0.703541355352	0.657682123297	0.617317846979	0.829790551853	0.669958084156	0.62046367514	0.606950566602	0.799293146526	0.80447338637	0.67824803033	0.80677301744	0.870185568685	0.642348340977	0.865463106365	0.684413487296	0.629526812828	0.661301808336	0.723187827673	0.696279858601	0.707117304081	0.853522841412	0.658343314337	0.615215062957	0.721967947313	0.591911802704	0.874706575331	0.66961198333	0.694602449895	0.688531454326	0.705145386941	0.857539412386	0.66224452207	0.8 [...]
+M63Fotr.140467	0.831747680953	0.675994439754	0.673830398179	0.726716997389	0.655085151802	0.699926856107	0.780451416299	0.710828081232	0.708158761425	0.840368527236	0.745353210397	0.863819538999	0.741322907474	0.853868885909	0.629841480884	0.626195138809	0.646233643193	0.645837338973	0.722370651064	0.621614191376	0.927460789523	0.642097361007	0.696138306764	0.866765129438	0.716859490292	0.93660079209	0.724909165837	0.619487769609	0.696206818245	0.861307797693	0.862807947222	0.77801263253 [...]
+F33Tong.140358	0.881684106028	0.739246087589	0.664993126155	0.301322583213	0.752756038895	0.761474233435	0.845829446802	0.313558516648	0.387712924822	0.828736039057	0.243809506839	0.835820829201	0.69600346536	0.803577763232	0.59962374569	0.700352545893	0.71486959562	0.504490895089	0.839708144337	0.643835784323	0.902678642976	0.693729867745	0.716238033536	0.898663522431	0.662949061412	0.901922976292	0.814122535562	0.832741317903	0.809070457692	0.810390826306	0.846237404271	0.752007293412	 [...]
+M54Frhd.140695	0.657418119644	0.74156483829	0.708630517876	0.83428052369	0.716531020257	0.632214550692	0.565841236763	0.794136229898	0.806321528132	0.572038096005	0.805087651781	0.893829059467	0.656367125057	0.885474433033	0.773602701892	0.716874652321	0.738916752072	0.744479458689	0.715952317156	0.783549331866	0.754686955056	0.728341713474	0.682740268752	0.632345708137	0.60961887152	0.795124628908	0.668591185062	0.807997657279	0.788271278747	0.622669895262	0.875038922813	0.5132125882	0. [...]
+M31Knel.140873	0.772260070523	0.55966590348	0.642434319214	0.763836224917	0.622554821486	0.65399199112	0.708504690128	0.728308323566	0.724235933554	0.728263323961	0.728481019485	0.832404211581	0.60059207303	0.829166985998	0.653191064396	0.623859278049	0.591999025404	0.643245817738	0.76726855936	0.663651132533	0.890526707612	0.673306447144	0.560571901379	0.798786530509	0.635715694078	0.906629625006	0.690376392602	0.705728327364	0.65226730533	0.755978651804	0.829086883228	0.704566796381	0. [...]
+F31Ewxr.140289	0.815939929018	0.655053347331	0.673403857923	0.785629482349	0.622045726275	0.694293907672	0.72873716271	0.783403976923	0.765881963027	0.765359279698	0.756079388359	0.754101235141	0.69449151225	0.759945228232	0.728083820838	0.682489345754	0.638180852403	0.73587335312	0.787687736022	0.714903140178	0.861071844125	0.692382279319	0.695044396959	0.829094295805	0.642381432043	0.895150163927	0.709574743922	0.73481969305	0.704438987758	0.76543462311	0.758914392644	0.720430887401	0. [...]
+F32Frhd.140444	0.720278653083	0.716957142763	0.641901049894	0.730362611391	0.62267405819	0.691773960697	0.71359176729	0.717285431714	0.71842787082	0.716893926523	0.763884606334	0.874862592695	0.714312817435	0.855831252398	0.669233783469	0.601342255259	0.683207929854	0.697002974543	0.682049339663	0.671445222737	0.876584810307	0.633894543316	0.671877111166	0.801296734868	0.660835100532	0.907218118651	0.676230247407	0.709731875659	0.728798267773	0.726804415277	0.872734042176	0.671058634284	 [...]
+M34Plmr.140826	0.826652024485	0.623244846961	0.615882285129	0.755962858694	0.635927778948	0.62429671077	0.742585435105	0.737499489698	0.752333972661	0.779789521423	0.729381816392	0.864553022139	0.655842790294	0.834989521054	0.652708967948	0.554288990206	0.603250432444	0.641671696116	0.712076146341	0.633576815481	0.911258292284	0.562055403557	0.573770271161	0.831919162618	0.662370319146	0.924948743722	0.711289753228	0.636223971422	0.683753636097	0.803456049316	0.85962030256	0.723299394086 [...]
+F23Knee.140816	0.847946302612	0.652372752613	0.710584632832	0.776091783628	0.719152760389	0.714360735758	0.768712082577	0.775592654241	0.781104371648	0.828368446572	0.763464308045	0.867037841861	0.659674720658	0.834975925675	0.681104364759	0.649525448123	0.663322770933	0.714317787434	0.799393478405	0.704127359609	0.905146018766	0.703123326007	0.58741112241	0.834970901639	0.705702525981	0.922275518621	0.755662513064	0.678686999328	0.61007074297	0.798898390239	0.820842974585	0.80390912572	 [...]
+M54Plml.140636	0.821410172968	0.620619125224	0.583773215077	0.617500739034	0.610573520776	0.681568922724	0.760695024258	0.654087147033	0.684707872652	0.79790228097	0.676429462421	0.861161892616	0.718050811332	0.841450079855	0.584945812441	0.601497704555	0.563954860747	0.567849559046	0.719036459527	0.602123816979	0.914171795149	0.582061635803	0.644269026482	0.842667718204	0.65921395659	0.927987534284	0.701648696326	0.682547389375	0.696170721872	0.818754357825	0.853129105659	0.721057181751 [...]
+M42Indr.140519	0.807139797176	0.661881218321	0.557374945355	0.617107130328	0.655429414812	0.65651966762	0.702910818237	0.623862904111	0.638295330172	0.765998841041	0.671871263368	0.879958269517	0.712042513153	0.865528901224	0.579177748796	0.620278681133	0.5974632992	0.556906815948	0.719732683588	0.596855641511	0.89939783639	0.613804196275	0.632515524361	0.810840290143	0.590839813983	0.912166084508	0.695095313227	0.71802051445	0.729640947201	0.79107589307	0.881624840732	0.704378127234	0.8 [...]
+F32Pinr.140328	0.657786130075	0.698168980239	0.729167646558	0.799336453456	0.735715293518	0.580178494109	0.595640433365	0.770862906837	0.771075735139	0.633589908266	0.769741378983	0.876760649709	0.648205433759	0.858161296218	0.763784602719	0.701871599397	0.734015596797	0.751140528108	0.710476930191	0.752273845788	0.798429858373	0.732244500421	0.660345785266	0.752598916235	0.548517871899	0.824309855789	0.673020797333	0.795670974712	0.759024100834	0.738219768331	0.874201711739	0.6294597138 [...]
+M41Forl.140496	0.807459896768	0.575093973219	0.620951343153	0.682808176414	0.640759148385	0.671420265115	0.706820805639	0.636193248488	0.644617432577	0.753384825405	0.647689057727	0.885518170157	0.669250553434	0.836955338775	0.564639172982	0.610001184344	0.556292333905	0.583649423242	0.754256276678	0.53428589793	0.903024780719	0.592403368165	0.603087217322	0.821594886761	0.550674552171	0.916708220577	0.691828598651	0.685199768579	0.636694792713	0.754118023643	0.851802339034	0.69499137412 [...]
+M41Plml.140691	0.807727850295	0.575329350206	0.624407009455	0.657203475137	0.647732527048	0.686402677133	0.721594168541	0.587408607871	0.576632918183	0.753114516172	0.664617517086	0.839669447443	0.665364449212	0.81027697751	0.551394217998	0.63187110085	0.571163443481	0.536654648908	0.794342649422	0.578895654841	0.855509473155	0.64236703667	0.64850826855	0.806905211771	0.516845222676	0.913588576604	0.707132116973	0.696735083499	0.666684392545	0.756306164544	0.831384571721	0.713550052536	0 [...]
+F33Ewax.140872	0.73550068454	0.693879069387	0.654462202444	0.792942651463	0.623306523148	0.708964805512	0.629715699696	0.770313840788	0.771580216224	0.734183079018	0.789023415501	0.871448263668	0.722720494825	0.86943348071	0.707784504524	0.642185304983	0.630334226783	0.733244552494	0.681925709196	0.719664785688	0.842122391247	0.674988500967	0.656986073581	0.821645360489	0.62113326221	0.886117273148	0.602313724876	0.74548627985	0.7458844719	0.750645846189	0.876811564706	0.682510925766	0.9 [...]
+M42Pinr.140457	0.711333852033	0.710215228745	0.72843665996	0.787599949499	0.710127572578	0.686942134648	0.602539461537	0.764231596039	0.759286830313	0.530631527103	0.760279767895	0.882267646803	0.647766132449	0.858345670164	0.738607613217	0.717047907065	0.707580231082	0.725342441321	0.761764209727	0.754241938767	0.69960425742	0.712171498208	0.706875628379	0.655895170677	0.458533838601	0.801492115363	0.720796848932	0.812390813489	0.759999877878	0.565691041802	0.863626252297	0.576888677971 [...]
+M23Frhd.140404	0.644726160867	0.775364091642	0.715668076638	0.810749447065	0.706250635227	0.70066555953	0.705127188913	0.800752693031	0.797014755255	0.743062914759	0.773298431261	0.913434304328	0.756903042942	0.919472539359	0.75443203444	0.734502482913	0.733926087861	0.738703270637	0.760708701897	0.760594968828	0.769717577336	0.706206945446	0.721136557987	0.729936810104	0.71154422303	0.78955941744	0.660840759951	0.827504851317	0.834823692557	0.74262371708	0.919426215937	0.598972381176	0. [...]
+M23Plmr.140648	0.756331325428	0.624960580533	0.588103586361	0.68928338821	0.618688563894	0.604457111954	0.667187991025	0.664170943845	0.658950586729	0.731301837491	0.664690982399	0.85450187514	0.628040801957	0.829482150534	0.580431658243	0.545940469023	0.516733612016	0.585104078859	0.673115880139	0.570018041726	0.883049372742	0.560881037708	0.610363439975	0.799803184768	0.568453535839	0.897513699772	0.607732687661	0.677728193502	0.677292781496	0.733273413666	0.859900397518	0.677889907069 [...]
+F33Plml.140709	0.849441659196	0.687854728181	0.66497834707	0.814504996482	0.64510240745	0.717472551576	0.773569607193	0.785851333658	0.792088538721	0.838269446713	0.829871135717	0.922493593967	0.777077308881	0.907124999812	0.659798844141	0.612261190696	0.611966587607	0.707623282039	0.681326848079	0.645449294295	0.931191184516	0.596347776706	0.695147784066	0.859057656819	0.730641092805	0.941078306546	0.689812085986	0.605876884142	0.70846207354	0.836745296302	0.916735446296	0.801186502191	 [...]
+M12Fotl.140774	0.642375147236	0.669575852446	0.639114203488	0.825902589263	0.628664844914	0.548740503344	0.633674975843	0.810199673474	0.804277359152	0.662291503867	0.805384286674	0.877281367893	0.662198748997	0.881346093026	0.689559950675	0.6389608068	0.604924127811	0.724866288792	0.653499080838	0.677640698407	0.851364989761	0.64429222641	0.612765318296	0.728502611042	0.619457559769	0.85974549774	0.610179816699	0.705898817797	0.702906402722	0.701286376627	0.884543697688	0.604581625271	0 [...]
+M41Fotl.140801	0.656372151451	0.871579658437	0.877244639086	0.877609935068	0.871279931114	0.795255004427	0.700143038988	0.882173230581	0.884492667928	0.627297499243	0.847308479906	0.914266453543	0.799099901624	0.902862921469	0.891312509295	0.885262963551	0.881739959724	0.875684073681	0.849043205447	0.890942597066	0.659837116044	0.863813800565	0.845551541871	0.602732779363	0.773068861685	0.47121208605	0.846812063676	0.913157011549	0.876385427849	0.581067094711	0.929320567544	0.71883922545 [...]
+M22Ewxr.140787	0.739041259859	0.64058284868	0.658140179355	0.765052413456	0.645117227775	0.677615367017	0.673685287233	0.715036324112	0.697966178192	0.705222122857	0.716335723515	0.763638747001	0.623538294579	0.72355785127	0.710671319216	0.676516018472	0.614849400211	0.686659850376	0.769253206906	0.72912508471	0.837854422115	0.679460745928	0.710276271745	0.772506257127	0.611421364919	0.87523330272	0.706125621396	0.748002389746	0.701647906166	0.731367589924	0.75957226984	0.687031436724	0. [...]
+M11Pinl.140316	0.686776985237	0.636854092926	0.615764033147	0.73488465973	0.660274727822	0.563961220125	0.623844048478	0.714508149266	0.70433377808	0.622340564199	0.700630920582	0.85640171827	0.566325099974	0.840261793515	0.649551616175	0.634396094822	0.610841961045	0.612858442605	0.740849413981	0.641637834311	0.806285402567	0.622256836246	0.583876906252	0.737792631298	0.481516674801	0.861126565552	0.645930178051	0.745182326347	0.659425244677	0.672403035535	0.848560081568	0.628588552017	 [...]
+M53Fotl.140376	0.869491443278	0.729270358251	0.665707522003	0.812085255988	0.702087521434	0.746403134949	0.817105232824	0.798283159618	0.81575319821	0.871381295803	0.822472020103	0.902269591435	0.810381314462	0.885735201498	0.693128280573	0.651727544056	0.672663735712	0.738205744802	0.749474911703	0.649191694931	0.942557764617	0.639173088931	0.71924363282	0.897619831063	0.782451123569	0.950818276524	0.745385999517	0.604514502027	0.729607791734	0.877056460099	0.879364833686	0.842064903611 [...]
+F12Pinr.140498	0.697116550231	0.713333626971	0.593180292786	0.739600943264	0.623353007711	0.626810345762	0.643043689022	0.700488336592	0.712066822891	0.701736270208	0.751077589456	0.879522526243	0.724003233982	0.86865843134	0.588656013248	0.608343404642	0.683542482323	0.64184377115	0.67081649242	0.640258163756	0.849766466846	0.625692750234	0.659110351798	0.753385796281	0.633838108973	0.877787404874	0.64717298733	0.75144814223	0.75254277843	0.738576780024	0.882615955486	0.605649598426	0.9 [...]
+F12Fcsw.140400	0.914810925254	0.804850217083	0.859076235241	0.840645598944	0.858455849945	0.852340298385	0.900564483983	0.839302885237	0.826775352754	0.87263151716	0.838552790851	0.779492617866	0.79518078068	0.771429511796	0.848427593421	0.848090049956	0.807306420903	0.833446529399	0.919635451134	0.843777221161	0.922051109672	0.862872002963	0.847107519039	0.911886871238	0.817534007707	0.945776649796	0.897730774854	0.814550075388	0.698243872169	0.893994115287	0.645018481954	0.871517499648 [...]
+F34Fotl.140856	0.848449800933	0.690173447032	0.688355992527	0.841054672136	0.652762941009	0.727135014446	0.787974452074	0.824878025024	0.834780565682	0.847517709763	0.828284977109	0.899280316946	0.814904192745	0.891903483778	0.702826632367	0.646466789578	0.644812347342	0.752115197637	0.726760987994	0.643686841559	0.93719552205	0.638245424461	0.728117227092	0.885220541452	0.762457201517	0.942545461173	0.696178685594	0.606320526071	0.737016847382	0.872629301052	0.888006553367	0.80710444055 [...]
+F12Indr.140556	0.822180243951	0.654947316872	0.616737226337	0.759525095035	0.640885800247	0.695336222155	0.739611760998	0.722032938568	0.722757069193	0.804547689227	0.73905457935	0.899284236929	0.715547147941	0.876987759343	0.556900155762	0.614714998521	0.63303331692	0.617026598249	0.703719938569	0.605001291515	0.912186453173	0.539839947535	0.63451939781	0.850252807793	0.660411919761	0.927333547875	0.680217693439	0.65075818313	0.708689575267	0.812927771019	0.890556574959	0.751816262588	0 [...]
+M21Pinl.140313	0.792748335699	0.683315428111	0.733001436454	0.718466631525	0.730098156241	0.706002830155	0.704405519689	0.691775274713	0.636045355301	0.703406808652	0.679342195739	0.727121792711	0.573065918001	0.688539861877	0.672172302712	0.673996909706	0.595691501306	0.651767647077	0.849885355305	0.722811314136	0.865446337065	0.715951444211	0.713209733488	0.782522711426	0.595181014275	0.888400844608	0.772624844388	0.769500529716	0.665177286307	0.714260392757	0.758264579956	0.7367337365 [...]
+F13Plml.140601	0.792269506043	0.703229099353	0.610000068559	0.795273897798	0.614683344245	0.712719850639	0.725993840064	0.768114871841	0.774883258575	0.76803082143	0.818669307666	0.904155558642	0.76066697937	0.900615020376	0.668466271264	0.549849036263	0.664530449945	0.712473384736	0.652825863795	0.667291610312	0.888576777106	0.662143074845	0.645915084413	0.827987150562	0.709237138864	0.904886332265	0.630703093846	0.69170685066	0.761561929342	0.77578877426	0.909378712408	0.701363139807	0 [...]
+M31Indl.140291	0.81639821788	0.641853293406	0.587297186967	0.55251113014	0.689684209439	0.687762037898	0.735837000234	0.479327374724	0.488854454547	0.76498230761	0.557698705393	0.87490767358	0.654249236556	0.824500960537	0.477871831611	0.633316043409	0.613170066678	0.421027462245	0.774017590069	0.537034794848	0.895690628825	0.615587968843	0.593368372127	0.808267590282	0.606269746975	0.918251286901	0.73488546276	0.725483984224	0.664711502828	0.764292432648	0.86210875377	0.716636689748	0.8 [...]
+F32Plml.140737	0.808266234073	0.674715366347	0.608097523087	0.764179786719	0.563180443711	0.692617360054	0.759367705687	0.720740285715	0.701034151523	0.782922095421	0.738754935916	0.828335445308	0.739487447252	0.779324387667	0.647486669222	0.623455823659	0.602804212786	0.679472264357	0.668707387715	0.646394252421	0.901137045932	0.596483580856	0.693874037136	0.84557572727	0.727298042881	0.914093352093	0.680208596431	0.644004873964	0.703343702753	0.787689703766	0.82957590263	0.723783956982 [...]
+M41Frhd.140449	0.71963604879	0.63318518853	0.69760162367	0.686416890773	0.702044031683	0.623216226681	0.615371255488	0.668533985998	0.65862606622	0.636757654423	0.646639067692	0.869969539178	0.588405258457	0.822494260952	0.664658664644	0.647576286622	0.64389128718	0.637265876	0.757923454993	0.64250545364	0.804165342807	0.646444622813	0.625156610288	0.678265858218	0.375337095631	0.857097485815	0.704914955898	0.767422896947	0.65320821095	0.620853756893	0.853270041574	0.643099968129	0.85154 [...]
+M24Plml.140651	0.810437969415	0.585321283004	0.603557943748	0.668180427827	0.597978495424	0.645659280487	0.729931843917	0.64707958292	0.618322978769	0.765449500305	0.658254999477	0.803443693437	0.684536449251	0.756935098045	0.616736033812	0.577328568472	0.44917058713	0.593791232383	0.734226732088	0.57433511217	0.903173712393	0.575592659738	0.620027190558	0.834774341664	0.5806412446	0.912158213726	0.683258883264	0.672388188915	0.638985118204	0.774294636226	0.796664998475	0.706816547222	0. [...]
+M12Plml.140817	0.730421943083	0.553628824141	0.574424815916	0.720429093174	0.618486901268	0.612202647484	0.633442285686	0.698801990403	0.690239203043	0.731719398243	0.695903324518	0.875598072953	0.654770363131	0.852775190791	0.592807873319	0.585562913542	0.534254097395	0.597955790474	0.691264495802	0.581549785588	0.890833269821	0.582283001305	0.575803886004	0.76604370845	0.585225800423	0.905152353496	0.612505443532	0.673992481566	0.680409568517	0.723214200969	0.869975727351	0.70034203980 [...]
+F12Tong.140538	0.855709713185	0.781166066467	0.734560709596	0.46434344516	0.75740412484	0.75199544433	0.832429874547	0.496392188677	0.503766492063	0.791748753843	0.333236892121	0.812493402623	0.687584557685	0.77036606674	0.709368558855	0.740299971665	0.73735121862	0.635825419537	0.857117416208	0.716857938538	0.877743348335	0.716535673101	0.744848306922	0.875494865161	0.623341183046	0.873940433997	0.81522593091	0.847641557712	0.808148783099	0.796240573632	0.838380221476	0.742021674734	0.8 [...]
+M54Fotr.140667	0.852319021371	0.636410798882	0.623511613861	0.810666989735	0.656370644224	0.717804342838	0.798770302085	0.792500389623	0.785954339356	0.837578647113	0.793099296051	0.823975335285	0.74089495352	0.828575209665	0.678842012427	0.663297655822	0.635528342328	0.724245984923	0.716579565334	0.67318685008	0.932582441479	0.640377420992	0.665488515811	0.888901846525	0.744617012092	0.940460248276	0.722117042654	0.582621053346	0.696419874608	0.854863696028	0.824668730684	0.818362555256 [...]
+F11Mout.140583	0.879406270994	0.748174546827	0.689909426092	0.433153188813	0.744965899676	0.767488818538	0.830137673511	0.312096166796	0.365175850159	0.819553815639	0.417029370173	0.858989374503	0.717030980931	0.807624482171	0.521873289134	0.690941290398	0.714764936526	0.492395722931	0.854851572821	0.623979481391	0.857893585651	0.707276197119	0.738849025638	0.868850438829	0.676858046144	0.919037586227	0.82432816545	0.824098329169	0.768965033619	0.814818121882	0.857300650881	0.74597272206 [...]
+M32Forl.140592	0.800124005644	0.657005098056	0.631705771518	0.703817588866	0.686382948345	0.671538093741	0.737328859402	0.679066593848	0.674460665051	0.755734061155	0.741107933732	0.88383494326	0.694106594953	0.834996025982	0.653593029762	0.589460703629	0.624962558202	0.628409107743	0.745372137809	0.596974849265	0.896856731771	0.634594017655	0.602415491974	0.832794847806	0.618948100782	0.913997415874	0.695143212768	0.694639018032	0.654433452549	0.77964240095	0.866927369243	0.706408775174 [...]
+M44Fotr.140394	0.543298212185	0.756715657942	0.776520852844	0.828267548887	0.749695266714	0.698961686236	0.716551764431	0.801380767134	0.787064082996	0.572205959619	0.807658658584	0.899913786681	0.722707077883	0.89128800204	0.789065866171	0.785445658781	0.773136926682	0.779583101229	0.709578515755	0.793631713786	0.778108685122	0.761978476147	0.751386559661	0.730320877382	0.663950452813	0.713901553203	0.672496899226	0.842564240157	0.799816907608	0.544509108769	0.889138865695	0.60185624457 [...]
+F11Frhd.140508	0.770636412212	0.568855651075	0.606829221656	0.591169354507	0.609494357909	0.624921345874	0.645617549101	0.527071880931	0.515391765025	0.728513633523	0.523615710942	0.85290309427	0.592324405515	0.806658511854	0.482239893468	0.557301206625	0.526248304172	0.50083302023	0.736177751951	0.527238956897	0.868684477551	0.555104449992	0.600969118763	0.779566900672	0.455905706483	0.904183281642	0.689042397451	0.713992323424	0.686983683822	0.744677034669	0.838192176897	0.655205632993 [...]
+F13Frhd.140649	0.72031281647	0.671349423159	0.648243592396	0.719128776226	0.66162490824	0.578602742609	0.686667846102	0.701321540006	0.682766449643	0.710757684357	0.688767103385	0.834830734862	0.660215101281	0.820937831839	0.687227614235	0.587983101465	0.587201425363	0.653040280934	0.677551272954	0.649135435871	0.841533262563	0.6274636721	0.655514384222	0.774478470483	0.583386040777	0.867240593704	0.626991022009	0.726097675612	0.727865347385	0.755355184497	0.850400957485	0.679288895115	0 [...]
+M21Fcsw.140493	0.939026075663	0.802247359344	0.843524991772	0.790715069347	0.846870018461	0.84843365128	0.920541906105	0.810695539329	0.770252080486	0.892829032064	0.773445299195	0.594631521574	0.819910014054	0.421993270282	0.856346264072	0.846037881695	0.785974156104	0.832104124976	0.914546507313	0.844813401307	0.95048720575	0.854296211105	0.86853829057	0.937374411046	0.842831657246	0.951214592894	0.900521934337	0.827006428966	0.764734469283	0.899909715775	0.614394359475	0.895372443289	 [...]
+M31Aptr.140474	0.677091629018	0.647565193769	0.712758530632	0.800831390187	0.717623390966	0.566226066554	0.55725999693	0.77742592736	0.743689284206	0.547483921226	0.761845630767	0.775856532529	0.52542705176	0.780047463744	0.74058265593	0.72179640455	0.68320971229	0.719597350446	0.734923039964	0.755829054841	0.797235806243	0.70473935126	0.65874346592	0.593325129049	0.548910812421	0.801906442041	0.720485746371	0.787955638033	0.725923541136	0.592195338856	0.820768660401	0.620805505714	0.826 [...]
+M11Forl.140337	0.757819182458	0.655458083554	0.6157576225	0.783058198637	0.607440977012	0.651193760264	0.696310340378	0.759560245076	0.748595899538	0.764150336409	0.776382093542	0.860745200612	0.646548867385	0.847623691366	0.667990622379	0.655024373632	0.615059151258	0.688698470771	0.696476845741	0.660200688357	0.89789363455	0.640423149489	0.631624546126	0.80128896135	0.743546404918	0.916014807693	0.647861633691	0.666114139159	0.650982186124	0.755993636709	0.870574989188	0.741336727578	0 [...]
+F12Fotl.140294	0.609555980123	0.813677460307	0.793351445841	0.874315493803	0.78541978427	0.695611464325	0.680279677488	0.848016574369	0.837634281147	0.630995980348	0.848815554325	0.915164729838	0.708293367999	0.9163927135	0.810866951586	0.815093373809	0.815361496478	0.819564727766	0.727456949641	0.83273635504	0.693601344329	0.759241704418	0.788702395345	0.693462206274	0.671405319218	0.721283677886	0.740020747772	0.831314938416	0.835721087874	0.688830028024	0.896175871203	0.604241892385	0 [...]
+F24Frhd.140858	0.727146603502	0.767056756651	0.737338010406	0.715667576719	0.783003850004	0.697388343215	0.758509683802	0.746102129932	0.722702040061	0.635094354555	0.663458167284	0.787883581145	0.67933810628	0.747624060428	0.77554060693	0.775379686181	0.764268497504	0.725087298986	0.813977873058	0.80719112986	0.733338552881	0.755730194759	0.742248572856	0.735747253168	0.655205774756	0.765980630595	0.78352216073	0.839743235682	0.802688115112	0.620811185321	0.84957146134	0.731888338586	0. [...]
+M21Plml.140352	0.87830363655	0.652482853909	0.705387366072	0.690498157221	0.714891204954	0.765616025185	0.831155471159	0.650879130498	0.610008874018	0.838452052696	0.666737487865	0.732145982088	0.670260830123	0.643714344086	0.647445232873	0.705228831803	0.625774087472	0.598024778667	0.856077705653	0.67689530154	0.927851172675	0.715928442983	0.701708791245	0.868751899682	0.70535253144	0.938535069126	0.82127645252	0.740727754321	0.629947342618	0.841604482825	0.732166236072	0.775497574306	0 [...]
+F21Fcsp.140536	0.94696990487	0.820091333693	0.863909362454	0.805792588815	0.879560355911	0.864566243194	0.92454938262	0.829341856487	0.802425449661	0.902196492918	0.799651551139	0.696656849615	0.807192819551	0.656315159147	0.87343363431	0.865903220866	0.82464303259	0.846253273463	0.931574845019	0.865351359772	0.951809949961	0.874123199202	0.880805840986	0.938966958736	0.849025277438	0.952587011756	0.919530161354	0.842533580518	0.678203722444	0.909251514991	0.586134145267	0.888999619375	0 [...]
+F14Plml.140525	0.783095621808	0.615873296097	0.596694386961	0.717610738954	0.603406037925	0.636003724328	0.705320524714	0.692520729768	0.701719873309	0.758511860222	0.69936366301	0.868940073652	0.653064731262	0.83253690552	0.62860675338	0.549510565114	0.618141529226	0.615728672002	0.699676900639	0.619172875608	0.894804594022	0.584360859528	0.579136125365	0.817810848561	0.605661994689	0.905187533901	0.641962891587	0.670974728647	0.676287192062	0.770884924568	0.85553278271	0.690685496452	0 [...]
+M41Plmr.140782	0.819873673746	0.591633783277	0.645828007944	0.63852467754	0.643450841622	0.688637945408	0.724874248364	0.644081930328	0.661713221363	0.775586504052	0.642120029824	0.851711999636	0.663722376295	0.820906303389	0.54854809864	0.578718380598	0.588322150852	0.604489513652	0.723287517578	0.542248935148	0.904079146767	0.584422024692	0.591419234713	0.827338565259	0.589827910387	0.920442286219	0.69092643979	0.651471928523	0.65712321248	0.773179837934	0.853148794764	0.724510393125	0 [...]
+M31Kner.140321	0.823609926623	0.618912276256	0.645266802628	0.799594666927	0.679592341538	0.666674626921	0.737266170606	0.785407105919	0.769439830402	0.780041488877	0.792142821242	0.828975386435	0.604339962081	0.779674843927	0.712034252584	0.672209157854	0.637644290145	0.6935311178	0.777975947091	0.728588755429	0.906824150353	0.656017532992	0.628780346603	0.828196447948	0.699283776968	0.921455585278	0.744962365703	0.681541096156	0.622116771561	0.804124646642	0.759772967554	0.757963873023 [...]
+F23Fotr.140680	0.699920224758	0.923508677123	0.926101519812	0.921500913798	0.921704339096	0.862480620898	0.824299822244	0.921875625538	0.91558772203	0.748216597552	0.901201220389	0.950708377207	0.863997950016	0.949070576594	0.928648287936	0.930627125028	0.926839465962	0.917031898166	0.891814008699	0.932662755605	0.543838782413	0.917733439745	0.911733968515	0.667922841083	0.853261243462	0.225777785757	0.886418552836	0.947522173534	0.930202579461	0.725630924055	0.950972207946	0.80181677997 [...]
+M32Tong.140309	0.883344131516	0.75941384704	0.704175308468	0.286747329018	0.759795209209	0.764151680247	0.847729969087	0.378531048148	0.391779388599	0.831107279727	0.227717230741	0.834777109279	0.689589691055	0.769995330722	0.658951581989	0.720063681135	0.727944483944	0.567710421435	0.835797564879	0.655237624163	0.904222843878	0.699097265372	0.710268479993	0.900132574914	0.659142474029	0.903614288853	0.813461406839	0.84046434929	0.786622182807	0.813164082481	0.845318443537	0.746973822417 [...]
+F22Fotl.140421	0.590868797487	0.794191855983	0.793309835512	0.862611209277	0.773422808515	0.665948906156	0.66614318129	0.834495840681	0.833715874685	0.443082606989	0.833985634362	0.887992268352	0.698698583526	0.888609643234	0.80053575205	0.776004719155	0.771208631441	0.789514235233	0.770017587392	0.803934139285	0.727757037797	0.727702894972	0.756821158858	0.605264155665	0.595954956601	0.71750523624	0.769560192821	0.824961425739	0.798935502361	0.511099235486	0.902189132684	0.555581180812	 [...]
+M21Fcsp.140319	0.942694072578	0.802844186287	0.817337851278	0.780983270741	0.8566504898	0.840851936026	0.921142013159	0.798299851715	0.749458527659	0.887356484876	0.802698108273	0.610371547417	0.838045351667	0.394423294734	0.839153610004	0.832932002583	0.796002150159	0.802945767053	0.917891801816	0.849213790192	0.957715263783	0.85850702682	0.881456066331	0.940856896426	0.855293188992	0.954679250241	0.911123793967	0.846961789272	0.809579887443	0.892305731332	0.646364234497	0.912527651375	 [...]
+M42Mout.140653	0.913448223961	0.761675448353	0.705622760647	0.374055601552	0.780059274695	0.794258342286	0.866226449585	0.280546107105	0.413958993328	0.857155960787	0.384030987444	0.84908611366	0.758031742591	0.80522470444	0.58809826659	0.730355374143	0.746281945773	0.530556182706	0.873071046339	0.665845815445	0.897668593142	0.740612229469	0.738281029393	0.909647434705	0.718420878142	0.932479285613	0.85549468988	0.834090039249	0.79885143833	0.856544508746	0.851940107408	0.791023865844	0. [...]
+F14Fcsw.140752	0.950918473322	0.81070526698	0.882909929864	0.839992100028	0.885318416952	0.872116388246	0.940914072607	0.86507193435	0.824989480614	0.913524810077	0.841140829328	0.714756889346	0.845332474377	0.710370921741	0.886093402202	0.887439396832	0.835877684093	0.861951346021	0.92665017684	0.876004869612	0.96635672659	0.887622035101	0.892771085015	0.957255701947	0.876657741331	0.964462609732	0.91675689675	0.836914456448	0.763525179221	0.920803031838	0.606401571795	0.929643796355	0. [...]
+M14Fotl.140639	0.717691723394	0.655218406911	0.7140324552	0.842130844756	0.679747677494	0.620631316756	0.660519017996	0.817551650552	0.801459348873	0.66678341292	0.813479516528	0.851762376203	0.613362507389	0.842198810841	0.748215980608	0.703475824481	0.675970878363	0.750501949329	0.715924877798	0.724175412698	0.877447325829	0.701363256105	0.633889558882	0.776574840343	0.650386418936	0.836081471098	0.686399451473	0.738937115395	0.694320383778	0.697687439777	0.834365825763	0.665583065481	 [...]
+F12Aptr.140530	0.814869716029	0.628842235283	0.658034432617	0.794917931359	0.639533278314	0.674764525636	0.725958446418	0.754678019355	0.774059461133	0.796058817189	0.775121889791	0.885151717464	0.72942187489	0.858865145566	0.611529420016	0.567111036899	0.615585110489	0.670988140595	0.697190386651	0.607595715903	0.907662297091	0.583925430793	0.64583760386	0.840427370863	0.672943183359	0.929171257151	0.674085387243	0.610559881695	0.677952660134	0.798784076728	0.889764094322	0.784590325963 [...]
+M12Fcsw.140326	0.9034465277	0.781637070408	0.845692726892	0.840141696329	0.852570131658	0.825922462809	0.860073448206	0.857752372349	0.826665908856	0.859062494706	0.839755024808	0.680257376785	0.770391222643	0.647848907184	0.860447673558	0.852709556149	0.804813611732	0.814016191261	0.90952377749	0.871071354055	0.923421701118	0.859232379201	0.854329896008	0.860652724934	0.83280096132	0.941342156999	0.887752822311	0.824976201159	0.750887646206	0.857969496618	0.339554284884	0.853892707281	0 [...]
+M13Frhd.140349	0.519933708636	0.783197713539	0.749331189921	0.874654190899	0.753601040089	0.666722625418	0.590563535872	0.878769536637	0.88052833828	0.620122642356	0.849727668712	0.928231935811	0.72600711846	0.917562279527	0.772931727097	0.763673311609	0.768800713059	0.782190667288	0.787192775233	0.771890340779	0.734686541232	0.746380937449	0.744081798627	0.546606255773	0.698712620375	0.769337275626	0.694948328646	0.826240440362	0.795039757435	0.600204532492	0.918594914653	0.650699731116 [...]
+M41Forr.140728	0.791536054384	0.572768568111	0.595333432969	0.711556371759	0.647722770591	0.626056553758	0.685326082664	0.685670114715	0.712631115225	0.767973000725	0.698536485004	0.878885842729	0.67307272779	0.869064735255	0.583952219429	0.607059301371	0.589426197172	0.573514320556	0.762569953025	0.583960967845	0.902751836077	0.61973972169	0.593934697194	0.803141448773	0.571501025381	0.914072218712	0.64775024989	0.707391806683	0.661793622196	0.740811166253	0.855939836605	0.723599223507	 [...]
+M21Aptr.140329	0.780434987966	0.609229223237	0.634658147086	0.773365852867	0.618075045981	0.687478596075	0.687474376048	0.743130235555	0.717922793839	0.723989687161	0.756246306995	0.813075077479	0.645118021931	0.778853031578	0.692688244636	0.641105134188	0.596725891488	0.698929123612	0.75711773898	0.675201617275	0.890716674778	0.66994803562	0.6818018902	0.764471038527	0.655045262617	0.904174858031	0.662917835684	0.724005781925	0.64891536712	0.729835054328	0.760094639277	0.729285076823	0. [...]
+M21Fotl.140499	0.819359012037	0.658597531783	0.674473117311	0.786815374297	0.617074696735	0.712681151799	0.77137751323	0.762536271595	0.746923611124	0.798896858516	0.758340918687	0.831052348202	0.744212304014	0.805741289999	0.677438612716	0.641821013988	0.584174632813	0.697331450105	0.731545028644	0.665222154272	0.918864135448	0.626131612612	0.716325151727	0.851345333364	0.701684719675	0.920966647367	0.729939196711	0.649563584535	0.677068237964	0.832442209166	0.848503369011	0.74706359357 [...]
+M24Fotr.140398	0.755901436418	0.708933060148	0.666319508	0.846568576414	0.666562547861	0.653558965325	0.719679828086	0.824909289976	0.831374323077	0.768569850746	0.825424920213	0.902352457866	0.751540820318	0.902418145962	0.67312718544	0.653727845041	0.639068305339	0.757820749614	0.584249847046	0.681600709058	0.898912867266	0.606672989386	0.687421144658	0.824434611792	0.737570804921	0.900569342475	0.649663950853	0.628545064813	0.732901635597	0.802171385417	0.905039694578	0.731878214699	0 [...]
+F22Mout.140788	0.896930244646	0.765468232201	0.68331294524	0.234460218716	0.770926618193	0.786112962422	0.861823377491	0.367943094799	0.367397410946	0.850547657192	0.33230098617	0.839078886589	0.712000453035	0.788572728988	0.637543995375	0.706389200212	0.738381602937	0.539742948316	0.847032529449	0.635164211555	0.916648053058	0.716356453583	0.727773817609	0.912103686588	0.694615915406	0.917052786297	0.824101081408	0.839262233322	0.786406108067	0.835735858221	0.849209129095	0.78067883653	 [...]
+M23Ewax.140338	0.504490318259	0.803385129879	0.810488835174	0.844516672193	0.796576335422	0.712528319639	0.660533740889	0.815095616546	0.801992243439	0.627896696676	0.815884387416	0.938156597979	0.732471570102	0.91032168181	0.81635542164	0.795759542518	0.792347182985	0.793696810079	0.78578586957	0.793264870132	0.663305221106	0.776120328394	0.791886847864	0.677162026771	0.683474921746	0.717759097842	0.723599027861	0.855896351607	0.799387698345	0.695528524201	0.919842975227	0.598862836389	 [...]
+F13Fotl.140488	0.673246214746	0.78912189665	0.706352172769	0.842616026125	0.71806371384	0.715865124773	0.697701363578	0.818524774114	0.807189730546	0.782949001708	0.847971322561	0.921770154136	0.781561394048	0.929468551628	0.772592370099	0.740831006951	0.764731414259	0.799165764493	0.714573234863	0.785395140538	0.838598703137	0.743082070559	0.760795585634	0.798093533951	0.737065264262	0.810132725623	0.682312631447	0.787361852017	0.818078523317	0.819737886594	0.918504141689	0.717692174352 [...]
+M63Fcsw.140818	0.942237769713	0.772809145203	0.852895809806	0.820125968526	0.855808142598	0.843974442027	0.908121837948	0.842913859162	0.814624661907	0.896069144524	0.812850387925	0.650922839943	0.800044816533	0.666134011748	0.855428253171	0.848448541789	0.807486589282	0.830361307556	0.912209660257	0.855036321584	0.965560164482	0.859219411892	0.855923767769	0.932853879907	0.846135137542	0.963572643523	0.908870537662	0.798665633708	0.759476363865	0.896823930634	0.593360177943	0.8976227673 [...]
+M43Fotl.140659	0.716600225474	0.686785803713	0.703444985276	0.798093997385	0.665750989865	0.639377901588	0.695402344589	0.779075940569	0.7759150843	0.706827279485	0.767261805943	0.876490818501	0.693704608818	0.856850234648	0.702063317523	0.644554725115	0.66155530823	0.768574113053	0.673479169051	0.677678201105	0.861646775504	0.683624053395	0.693633324863	0.795042706287	0.675384238021	0.868761557801	0.676446459162	0.712873518412	0.694687530893	0.751284777929	0.89334356975	0.648731746707	0 [...]
+F21Navl.140840	0.776727095738	0.633330655204	0.565687628179	0.567632215023	0.630186321544	0.652468751992	0.707330628803	0.475360089132	0.50605481726	0.749116509921	0.558465240932	0.85864434312	0.628702502396	0.832376185098	0.470048304872	0.575575329812	0.564621216758	0.437166632176	0.757710318039	0.529625436297	0.872385498561	0.558177563048	0.60660592467	0.789464656475	0.520305144168	0.899571027633	0.711696015481	0.726550208315	0.688563697735	0.753403577179	0.852537703641	0.682228935132	 [...]
+F31Indr.140675	0.79391497789	0.583691084449	0.533072611664	0.688495228703	0.545768700973	0.652386776958	0.702607837031	0.656929986029	0.662244935127	0.772817303227	0.636585295177	0.832371079738	0.697542980278	0.792869388296	0.540074083355	0.58750959687	0.45094996809	0.582089795763	0.691417404603	0.527420432398	0.904334425799	0.558791437092	0.623264568857	0.827494902069	0.595710066402	0.918490132711	0.632968150649	0.63682325356	0.64202533394	0.79813120862	0.842678616169	0.723078154317	0.8 [...]
+F31Fotl.140715	0.788134481703	0.692671597532	0.670168985249	0.80483600508	0.650263396747	0.663345500346	0.735703139765	0.780504298735	0.791591797135	0.802923025525	0.786453885878	0.884244620339	0.785888578716	0.866425621287	0.671150612678	0.640533703436	0.616676267763	0.752658374952	0.637866231075	0.654803458374	0.91	0.621978119946	0.670100189243	0.862046024978	0.71634809956	0.921033230558	0.62455301093	0.631675391205	0.700534910069	0.820367837188	0.87545324898	0.74824170305	0.8863660734 [...]
+M14Ewax.140489	0.68043735668	0.909971855906	0.910112329715	0.917807022401	0.896938494431	0.837392439051	0.779542279159	0.918526115336	0.912163369465	0.719940754817	0.897360920105	0.952524124147	0.823249822458	0.951006621246	0.915446015987	0.910375517619	0.891909314916	0.900452475781	0.890425883048	0.920158035251	0.261713940857	0.899690163407	0.892807415567	0.619209198407	0.775610186618	0.467960380028	0.870459625947	0.936374897868	0.910533616287	0.69910150551	0.948811035465	0.744587457765 [...]
+M32Plml.140507	0.791348981007	0.676036990915	0.603101967342	0.748315206465	0.608284724521	0.671069668748	0.737192550964	0.737198787516	0.736008006988	0.776841286896	0.733902687024	0.885829717001	0.703631257775	0.860608307764	0.65879533249	0.639904051988	0.648219207494	0.66087911354	0.722052942774	0.631244777522	0.900555175597	0.642489401372	0.629904065815	0.815476483299	0.702419076629	0.916589897092	0.617330360136	0.714117345531	0.693728445449	0.809251170571	0.876940780837	0.685293109036 [...]
+M32Fcsw.140646	0.949801551378	0.837693922847	0.875110761738	0.847904538553	0.88267994623	0.865961903566	0.92071591123	0.87243808574	0.836590376816	0.897210176338	0.846056582	0.59467935527	0.815546122913	0.596954641082	0.892209623677	0.882746929652	0.83212909427	0.85866390756	0.935590130643	0.897915801089	0.95982348016	0.89252029321	0.902349039929	0.940471330709	0.871689115164	0.95709243302	0.92175751007	0.850461584585	0.788606614487	0.905405576316	0.563895172935	0.899509661273	0.59805088 [...]
+M63Frhd.140286	0.705741905071	0.725837364868	0.742174539262	0.824464995803	0.720355822682	0.693420774901	0.637278455506	0.815278481437	0.829744984461	0.580595428512	0.771313762291	0.891965913497	0.629100300725	0.879765953041	0.762765800941	0.757374894204	0.739770021502	0.719651038203	0.801124957065	0.764147890047	0.743959629778	0.735974935293	0.695656845209	0.55737744794	0.585061943708	0.770181839708	0.717566465764	0.822846325274	0.773758143733	0.470860796575	0.881064177007	0.60608865167 [...]
+M14Frhd.140825	0.551281465576	0.783006556476	0.729527508198	0.872530929783	0.707050624661	0.614989852558	0.641855456286	0.87657500728	0.87811261497	0.605219228777	0.849030948826	0.902542193076	0.730581386911	0.900136177042	0.774634374319	0.755458043491	0.755639556538	0.775334769569	0.753126338959	0.755401194675	0.778886299692	0.69393645907	0.721953514493	0.6294074787	0.670188191297	0.800461400889	0.703415483877	0.786417293391	0.781363842129	0.583272392888	0.911577405542	0.626057846399	0. [...]
+M22Fcsw.140276	0.906866168489	0.794588960658	0.819848290724	0.780945645673	0.825769757554	0.814016569453	0.890033482646	0.79479717176	0.750719390423	0.85628348769	0.754642496886	0.606711930264	0.802922036145	0.395353940997	0.837848028709	0.823760180608	0.766076674745	0.809067852747	0.896693394884	0.826394234876	0.913077437512	0.830335326791	0.852191472296	0.903819771746	0.814800954071	0.935674862226	0.874348930137	0.819590554236	0.76176963518	0.859968039663	0.659726095179	0.864321012299	 [...]
+F32Indr.140459	0.748756218905	0.671267565421	0.619798965443	0.734571927258	0.57179664468	0.702248968329	0.692918318005	0.684725169237	0.689878012581	0.743213696523	0.664570741909	0.867508428787	0.705170613682	0.849674119187	0.639734870004	0.681619030167	0.648350045955	0.629313783563	0.694910700486	0.641362670728	0.863756413062	0.596516706372	0.707804264494	0.820914025887	0.654832014951	0.882349928366	0.63228349276	0.742202071843	0.776503273773	0.775872406763	0.871172924288	0.607898472486 [...]
+M42Fcsw.140447	0.939707602339	0.77161987575	0.869478544339	0.8376261108	0.878328225765	0.866131431965	0.908402560957	0.854052768277	0.822672286259	0.89474697846	0.832163074867	0.648209525241	0.818466144816	0.628224407756	0.88547006158	0.875767669972	0.833321130248	0.863173410405	0.924779587284	0.865088229522	0.963874858252	0.882124418866	0.889199255668	0.914469564606	0.863905307397	0.961681867033	0.923827001008	0.851298114066	0.78289404425	0.909431710851	0.594711900406	0.88888607674	0.39 [...]
+M63Plmr.140815	0.797861432547	0.633603329761	0.602754370011	0.701208674952	0.666083131705	0.673410000678	0.694930457494	0.650662854713	0.684988510964	0.746926737743	0.682895147745	0.898468093105	0.676643726122	0.857512118537	0.602118658971	0.621137969593	0.617727982808	0.575424793944	0.745886727069	0.632240286947	0.88863022637	0.600712782014	0.609434592156	0.820560252408	0.581023052795	0.910503899721	0.708935879743	0.704886904559	0.739806031513	0.774948975984	0.864038947448	0.69544201565 [...]
+M64Plmr.140356	0.832361646633	0.644174478843	0.591767395442	0.701116653956	0.646801079352	0.689695437005	0.787191969562	0.673714628643	0.702514869608	0.805734877392	0.727173374194	0.860610677284	0.72954347913	0.847933565306	0.641197928015	0.590249452524	0.593879380415	0.631562709264	0.731989005761	0.651172136789	0.903544521913	0.635903880144	0.650054039024	0.863017207887	0.689490232992	0.920759339628	0.696711555962	0.669304600947	0.736140524171	0.827037076482	0.863461740499	0.76562915956 [...]
+M31Nose.140415	0.710087483441	0.863007915835	0.833315194453	0.808453544456	0.857740826228	0.833580627513	0.781863961782	0.819690778157	0.805585686229	0.753062759994	0.826807408941	0.947424649149	0.79722183402	0.954177288455	0.837131850137	0.856445776852	0.86251704968	0.806526898241	0.882179545152	0.845673148912	0.610955222555	0.860211820397	0.84702644041	0.679669117324	0.747620898213	0.64973000352	0.834488873886	0.913020761885	0.884994037227	0.723425601718	0.93376314687	0.685495453429	0. [...]
+F33Plmr.140674	0.862857228644	0.716042461185	0.670674840031	0.74677072448	0.643675477466	0.752576051595	0.791840362199	0.756689614901	0.762161550645	0.860528005178	0.783785139317	0.903732105331	0.798090097593	0.886657823412	0.644727743442	0.633161295195	0.618268304293	0.696114603284	0.687190498276	0.622639460917	0.937394987629	0.609969807467	0.714730355809	0.8946028018	0.728848774341	0.949962224676	0.72064507745	0.627107385937	0.738575061421	0.879124417962	0.900381839501	0.789211008595	0 [...]
+F22Plmr.140357	0.789912197368	0.655173436275	0.552904738044	0.673658780333	0.615866317673	0.650930480363	0.694799628135	0.699211390401	0.700038913855	0.772139888549	0.751528789729	0.885487453889	0.723063745032	0.862993800216	0.541023882248	0.565105721508	0.569413356355	0.61195311795	0.673248792359	0.575420166274	0.904530613762	0.566472910924	0.625659916165	0.817353672312	0.608170071951	0.915454501636	0.623066848381	0.665315116304	0.677360386469	0.780402175375	0.854876302061	0.75711489836 [...]
+M42Frhd.140770	0.762786578395	0.601381474568	0.598550648402	0.63688068957	0.644475899291	0.630609911923	0.642272534838	0.599529321774	0.592057936006	0.673805287808	0.572852043992	0.866646096442	0.595164548255	0.835345972005	0.559323773255	0.635086848451	0.554909476793	0.500160192058	0.744237619985	0.580143822264	0.851038999702	0.566007014028	0.589030674409	0.739718032988	0.436817170794	0.881717273949	0.664684098407	0.742745061842	0.686673328303	0.65940729959	0.861957281216	0.604247899866 [...]
+M32Indl.140814	0.816576910629	0.640940465022	0.584059833013	0.756672245605	0.63224670292	0.649199526216	0.690102827401	0.714543288465	0.728230527776	0.756504705736	0.722385696747	0.879788989076	0.688655329522	0.870266178073	0.618017872824	0.617966517	0.610449442194	0.61801088364	0.71110438648	0.634697635377	0.887511577112	0.611596146564	0.608587539776	0.810541683787	0.625776766583	0.915053183667	0.657073707029	0.704268514	0.73457311283	0.753053311665	0.880894945017	0.664371378775	0.90101 [...]
+F14Fotr.140665	0.653080116185	0.786791640143	0.740977678155	0.868274435561	0.719062309708	0.641936631176	0.630899128827	0.842270455011	0.832613837632	0.690614789437	0.8448634923	0.918721099511	0.742566824502	0.911854246586	0.792985551208	0.734097657473	0.762328422621	0.806569938964	0.676484716628	0.761040521662	0.83110376055	0.701034810445	0.734519743415	0.748616872896	0.669221777785	0.809642019736	0.693966521207	0.783401405806	0.797582114349	0.748177840153	0.908713792442	0.616926814515	 [...]
+F22Pinr.140355	0.525132249932	0.855195606303	0.855180136052	0.897503092382	0.836000416415	0.759733673097	0.64013279038	0.866600857785	0.856327201556	0.562028598475	0.873462048901	0.930455833922	0.763663204089	0.921957937764	0.873271968974	0.854407743937	0.846434237805	0.852671947965	0.81602373519	0.872672600653	0.475630804275	0.84526678503	0.830297732446	0.605773980859	0.674669934623	0.605518345952	0.797562347694	0.899622606352	0.866235597797	0.686441810304	0.930555882369	0.650905983394	 [...]
+M33Tong.140346	0.893440439543	0.763246096724	0.686981031855	0.225276743153	0.781081151836	0.780602025612	0.859260607781	0.370218471066	0.420976709632	0.845548390345	0.276110670948	0.835152674431	0.704911432768	0.788304689068	0.639919223372	0.724538297558	0.749382406082	0.517681816722	0.85507310677	0.649913275892	0.913492197112	0.731797561043	0.741778515342	0.909037539779	0.696273547879	0.913668010362	0.84090004398	0.83027639348	0.777507917508	0.844358161573	0.834074884724	0.775079184014	 [...]
+F12Fotr.140597	0.665141477243	0.843226597921	0.807852388273	0.889429744786	0.797872274428	0.750780347563	0.745594365365	0.861369519801	0.852266597296	0.70520094385	0.86807345995	0.922786366303	0.772368772357	0.924141891422	0.795709133641	0.848167353482	0.836306417031	0.826563678615	0.739936468552	0.819532631784	0.775529937832	0.758248592757	0.799106154451	0.765378082158	0.729675117036	0.750855418011	0.750939699296	0.82174746694	0.842237843871	0.750604682848	0.91186394328	0.684640750049	0 [...]
+M34Frhd.140638	0.671193342611	0.800111094884	0.810054642014	0.856878986122	0.817138932043	0.694288520303	0.593381249599	0.86295331657	0.866983762159	0.647137431119	0.838438981741	0.932654343049	0.62544343773	0.930827077468	0.814961258164	0.789797661798	0.825755354432	0.789416192509	0.825292937003	0.828295475268	0.689587181729	0.785693213293	0.737716958443	0.533301107351	0.673401218284	0.715946767662	0.795730541212	0.874502513646	0.805247884165	0.596704785337	0.913689764874	0.606390155706 [...]
+M42Kner.140502	0.845582992981	0.586704459968	0.632600767407	0.787507521472	0.660395951486	0.710337444837	0.786828964971	0.753103017139	0.747556190189	0.827451798906	0.782083101557	0.851776794265	0.730948191288	0.830176592576	0.624152289761	0.640558398756	0.602349835582	0.675760093091	0.725207349152	0.643159909058	0.930423239207	0.635982410402	0.678681848422	0.848273342078	0.722213267131	0.937877009417	0.731331855732	0.639098952382	0.627883194405	0.824818201577	0.826253589345	0.8009555896 [...]
+M32Pinl.140666	0.739745241038	0.672286987352	0.690852857773	0.785081878435	0.674761915481	0.640895834108	0.594784319977	0.751727188126	0.758874984616	0.617039428211	0.745852384686	0.884380714752	0.61196154312	0.863240441251	0.7016930475	0.689642033034	0.654294819387	0.638739858566	0.742154529883	0.678673770397	0.81959100936	0.630711929104	0.657978196492	0.698598002267	0.521067626331	0.861291878553	0.667357026753	0.753436900335	0.707766901173	0.669421152529	0.865822142132	0.53641304184	0. [...]
+M34Fotl.140468	0.803544417426	0.589943051405	0.685242186497	0.775382365373	0.697707632221	0.605968860437	0.719438260831	0.765010635472	0.734775055832	0.753741967644	0.755360750227	0.793471820796	0.623899194222	0.776325497658	0.701953296509	0.683368922346	0.656537834356	0.695244704644	0.735847546134	0.681303932208	0.898704384825	0.645364572869	0.632415842535	0.813957010288	0.677260237897	0.896194742424	0.736172013373	0.710441048725	0.685640739168	0.773092533225	0.792207397803	0.7332792007 [...]
+M21Mout.140501	0.856461017721	0.731322587894	0.694672382293	0.43756894879	0.744636633902	0.74821444387	0.816428841381	0.40251120056	0.339419004874	0.773255861116	0.477694471777	0.840356542564	0.716544795367	0.808720469931	0.590900522452	0.689757879231	0.702002068292	0.498808970486	0.848413363051	0.669709501415	0.910164643779	0.721153241527	0.716843627797	0.885869527528	0.628484016827	0.910086385525	0.811350014203	0.811948905644	0.771786252857	0.839360502806	0.824249470246	0.743718581563	 [...]
+M42Plmr.140511	0.819962447796	0.630430893616	0.587250684151	0.744308814413	0.604323080688	0.691100469395	0.711498478516	0.708682156506	0.710811122849	0.787985576872	0.717823512745	0.893271141679	0.722085879346	0.876285318462	0.5686029478	0.610228673398	0.552708322592	0.597335372394	0.694656805321	0.611557963038	0.908240818676	0.531259483349	0.622868666156	0.833081960873	0.614978746088	0.923012141988	0.67454507006	0.646083023007	0.710518690392	0.785189652175	0.892368348398	0.712345456869	 [...]
+M11Fcsp.140589	0.927969026206	0.795212769053	0.856726750152	0.828984298091	0.862094146362	0.848890908499	0.901384451715	0.846649350515	0.820011303651	0.887609695657	0.822865162059	0.686363088975	0.793414858621	0.658427586569	0.866215646332	0.8615075786	0.808295820962	0.831842457203	0.919990866147	0.872199998887	0.947809287585	0.869238661779	0.872806647018	0.906978278055	0.842212973324	0.948423822642	0.894402859856	0.835954228878	0.776711663097	0.902242579771	0.335016723473	0.865313136106 [...]
+M41Ewxr.140794	0.777503434689	0.803546209384	0.802780337519	0.799932206595	0.834147031	0.751810367931	0.819010948791	0.795743198168	0.797448637942	0.680144293565	0.754019768914	0.835547321169	0.670958495267	0.819170238728	0.827188203596	0.810894442337	0.782457046128	0.799401639479	0.863794491589	0.841341905648	0.601105618985	0.803484143593	0.810390821145	0.724653565926	0.655457429889	0.72581628439	0.807053101849	0.872667650961	0.816042415877	0.674962508521	0.832733832584	0.695583593135	0 [...]
+M42Nose.140308	0.752546773296	0.615890241377	0.601597850177	0.639046888188	0.637464256423	0.636328760543	0.651733421645	0.592060042313	0.592094402851	0.660499893414	0.576672596034	0.862347666338	0.586349852128	0.824616920613	0.58195727424	0.630295318583	0.604988966944	0.526169973728	0.75304579002	0.606191668239	0.856879906685	0.555467871632	0.590070170103	0.742244070672	0.457550370124	0.885474803616	0.688804019843	0.758746444712	0.711578753906	0.652434543946	0.858235232722	0.597725806754 [...]
+F31Nose.140465	0.737314609552	0.65308650928	0.608567766145	0.686677030256	0.620842513342	0.660530498265	0.680023240514	0.650992276499	0.646139167601	0.699465339031	0.633261678136	0.838511442107	0.606897850292	0.818337002864	0.666713241319	0.65731511328	0.620409170685	0.650162699191	0.785928615373	0.663661563626	0.845992566756	0.625648729397	0.654851597929	0.776461403113	0.481390974025	0.876299912646	0.730503747875	0.727265653206	0.684778368554	0.734254044613	0.810379091165	0.60662171048	 [...]
+M53Ewax.140448	0.704056066904	0.819747486429	0.839625268798	0.860550673254	0.835110820717	0.753607929084	0.606042619378	0.866541055222	0.867770942761	0.629184852537	0.832209747633	0.926493925275	0.67900656871	0.924311656765	0.849564400713	0.833510101775	0.842648610354	0.810904550234	0.866122517963	0.862825118595	0.602394393615	0.825101846085	0.793992272501	0.432175465692	0.678499623455	0.644155258468	0.820228755491	0.890703630112	0.842466299624	0.52288802144	0.908259851429	0.621282223831 [...]
+M14Plmr.140579	0.677734253895	0.698624088322	0.631156068839	0.820433815296	0.627837704243	0.60828190522	0.677415556923	0.8006104347	0.798009784175	0.680685944289	0.793325612103	0.865024536129	0.672657758063	0.871909266303	0.683462045777	0.678247121602	0.639642727396	0.70632163142	0.680067207643	0.707738157007	0.855990545562	0.615061595765	0.650655519418	0.737331300089	0.656564954398	0.880697112539	0.614605523612	0.712388557348	0.731828514	0.707187744466	0.863625200339	0.670126786484	0.89 [...]
+F31Forl.140333	0.797252396225	0.65722324068	0.603980647599	0.761902419626	0.597726037376	0.713513285902	0.712515996659	0.723035121742	0.738482478074	0.779570305658	0.755667967219	0.864509800237	0.735289125377	0.845288441936	0.644687343241	0.639344241733	0.617607270896	0.697889297738	0.674117576072	0.629208253424	0.90334761382	0.640763824251	0.682842133911	0.828631094678	0.725005923481	0.916232051226	0.644142607955	0.664604809607	0.672468497603	0.805049073231	0.86532002534	0.722554628831	 [...]
+F33Knee.140702	0.827760539249	0.657068205531	0.567091845281	0.753265799213	0.602484282214	0.681402229651	0.760518156184	0.744796074078	0.719635633048	0.812870184306	0.784004731774	0.861525937181	0.740306135331	0.820239168046	0.629797975184	0.578165145118	0.571960709347	0.657719195807	0.708086542435	0.61635255296	0.923455868955	0.619068112884	0.657429154988	0.855443213296	0.695372963089	0.931714857179	0.702167987012	0.627926347704	0.68680469795	0.826687099989	0.844015886807	0.80169293343	 [...]
+F24Fotl.140630	0.629763046954	0.861525400257	0.870807095741	0.878027437542	0.839403855834	0.75523079835	0.770094289307	0.848092115912	0.836271062087	0.595938219848	0.849052063157	0.913480585544	0.774604299479	0.914851196741	0.873850369264	0.858742126048	0.857360640879	0.855404291394	0.802404254179	0.864872685628	0.71084836417	0.832621352881	0.828963498587	0.721136550806	0.720911784006	0.574245619138	0.807711699182	0.890904290218	0.853512894001	0.658452928221	0.915323923866	0.573094701436 [...]
+M11Mout.140852	0.881634969432	0.734384757266	0.67939539293	0.284948693821	0.755458399308	0.765500896447	0.839417896502	0.304469218949	0.407560819142	0.82783954733	0.417938350859	0.856521568277	0.709338983632	0.817517723889	0.551090927892	0.703917666444	0.719254155579	0.469239379597	0.845978511225	0.636201835594	0.900242528206	0.715260914109	0.721701058595	0.897587674806	0.698916949	0.925544880483	0.825994114561	0.808442010113	0.76015077438	0.855795407971	0.843053597487	0.75019307093	0.84 [...]
+F31Aptr.140458	0.72800683664	0.653041814015	0.644760072206	0.778033486188	0.641208689139	0.668907030717	0.654304258548	0.744929055283	0.716979102728	0.664788140859	0.743921358679	0.783686392902	0.598551337816	0.74571128938	0.71312969865	0.667158065651	0.643237547656	0.698976925455	0.781588221439	0.706836767671	0.843074302089	0.67307742555	0.699819335673	0.769104976262	0.551554785101	0.869657296263	0.693178381977	0.753182873306	0.646895703451	0.728391182374	0.751489994579	0.669466851031	0 [...]
+M12Navl.140334	0.784033062763	0.65633305173	0.637530938902	0.810787946945	0.659376343209	0.637964744548	0.697463864341	0.794006112935	0.790067939341	0.774943428822	0.806338648106	0.891985775679	0.712870515337	0.882010566182	0.609796626984	0.62727655905	0.596829180954	0.696906377384	0.695519212324	0.620506909954	0.908450053431	0.600429255749	0.625155865878	0.819599916191	0.664794570843	0.923073110845	0.633414388616	0.660765911632	0.692050434318	0.783114584819	0.885716136528	0.770027102247 [...]
+F11Pinl.140315	0.817746097054	0.607353651396	0.61673285027	0.689154321766	0.638250625944	0.741366963122	0.764172126621	0.598530866338	0.613576453697	0.803643625028	0.695840830351	0.849844215289	0.650860706823	0.828429712581	0.462402679315	0.627524752237	0.601512312837	0.584749909787	0.750648980193	0.604662319758	0.864590571042	0.648987127437	0.669764385916	0.857378138611	0.667249898124	0.924189181058	0.708465011895	0.72363684805	0.688762909527	0.803980346419	0.829192360196	0.739301645546 [...]
+M32Mout.140657	0.868061748294	0.714947810414	0.658017890887	0.306877307776	0.745809082981	0.763536345776	0.837451774503	0.284230403119	0.331215609634	0.815049109544	0.329011068086	0.833431530484	0.682998688214	0.797946754496	0.543381282745	0.686957937534	0.69167297512	0.471100779149	0.829824300872	0.614341395169	0.916287385856	0.695007032199	0.703286646215	0.894930543822	0.671520122001	0.917769645013	0.809832210199	0.803931162726	0.76496767156	0.834518673565	0.836278995661	0.739753139997 [...]
+M43Frhd.140486	0.723467689069	0.612369158607	0.642904270867	0.697439496625	0.664412348888	0.535380677734	0.647720733806	0.671431326728	0.663011352155	0.638720941693	0.641914796304	0.84804321222	0.576202663952	0.819553875733	0.613227117771	0.596808229914	0.595396126158	0.585198747658	0.726783652904	0.614460788724	0.8418267896	0.609271739924	0.57580591136	0.731626335622	0.470812865241	0.855389332823	0.663004340629	0.72655774731	0.669927143289	0.676557682024	0.850898329775	0.623351448367	0. [...]
+M44Plml.140614	0.838224521804	0.63841180563	0.598122560053	0.62322845882	0.62513041668	0.6851756326	0.768819724248	0.622390039336	0.651857764696	0.796929767015	0.697304976632	0.888992657532	0.722810791626	0.875088525393	0.542271235745	0.605506494048	0.59159892591	0.54269571347	0.734272547974	0.578914650444	0.902799808492	0.569331908724	0.628711826206	0.862659673749	0.613121752746	0.931681650159	0.723215274853	0.681185113863	0.701634973677	0.824628616224	0.88553885671	0.72525685684	0.9011 [...]
+M12Mout.140350	0.897560207545	0.743442727122	0.696880186777	0.244605196448	0.780237919026	0.790770772702	0.859911895013	0.346648068149	0.446908037746	0.85058799985	0.363130420149	0.852202800741	0.729092901807	0.811743593656	0.586050618244	0.72615020559	0.742503687307	0.538017268245	0.87129556644	0.660592267518	0.882220989139	0.738712889363	0.735229535685	0.901031559017	0.720424295364	0.935923893112	0.841803519232	0.827549908517	0.782355860845	0.849797976089	0.848341183938	0.793053119131	 [...]
+F23Plml.140585	0.800434647938	0.612671666688	0.572278454656	0.715343763979	0.606965399798	0.644046683409	0.754733786006	0.67702068706	0.687273357436	0.802677038427	0.724892478942	0.864317617926	0.707126875301	0.842907788738	0.574851434846	0.554135515688	0.580199529582	0.622791463652	0.699157822515	0.562571265678	0.912704921428	0.585911520596	0.631447097539	0.853816271284	0.646843938493	0.926589897708	0.681010530601	0.644970896477	0.695825034426	0.812682147531	0.849735592654	0.73470578271 [...]
+M31Tong.140808	0.88677986875	0.755337183922	0.682878590098	0.222902352374	0.772079010152	0.779258541551	0.861409754476	0.314489513467	0.340556373795	0.836017838118	0.218005358964	0.838193030542	0.699391545026	0.778352719492	0.628483684021	0.705691376995	0.729731738713	0.528016406436	0.839069741108	0.625089153099	0.90740065571	0.722722377606	0.724170003646	0.903168647336	0.672643288169	0.907079985037	0.8214631869	0.837971898813	0.78322256353	0.818893229283	0.849952915949	0.759046939452	0. [...]
+F32Navl.140293	0.787984711451	0.646720674391	0.603089490473	0.740984853541	0.583303746409	0.667718095267	0.683183643928	0.701030758278	0.721836970917	0.745768266272	0.710460760666	0.890230564448	0.686403282411	0.867380202721	0.64121415677	0.595353535208	0.591565642799	0.650335626998	0.73944103441	0.619964986376	0.880868395632	0.522113786037	0.625491601755	0.828477902516	0.515710156895	0.89956147271	0.673447262345	0.695288187171	0.694904302788	0.763477373947	0.863109457226	0.627496295819	 [...]
+M42Aptr.140377	0.568660946539	0.735526513778	0.757488334004	0.8424126333	0.77452307155	0.653274773768	0.611876873271	0.849305514078	0.849411632859	0.537944552745	0.825514418721	0.891125583638	0.698837893996	0.888223740293	0.775380019592	0.779689222777	0.775430747217	0.765524602259	0.758634109458	0.793182472794	0.751157092961	0.755862250501	0.7313123744	0.496165896251	0.682793759647	0.775652041297	0.706415624685	0.840669467664	0.780910068254	0.211799432965	0.903161489198	0.656291817964	0. [...]
+M21Kner.140721	0.816308761524	0.63562774925	0.612571648166	0.704747193583	0.658514447742	0.712017068285	0.720614114888	0.670020016895	0.672777432166	0.803235483375	0.706728652548	0.830437109532	0.69091825519	0.785979105163	0.57969169048	0.630750880055	0.548060210898	0.612282856813	0.755058233181	0.586751220122	0.91849472044	0.618806190881	0.664766687271	0.833194857046	0.666583722305	0.92657698205	0.722003963646	0.658804167404	0.619914468477	0.809039012235	0.81002088349	0.761033957866	0.8 [...]
+M22Forr.140708	0.833541137571	0.606070803726	0.621090414505	0.743184007555	0.632946697415	0.69050178656	0.730020700794	0.717279612012	0.711749686975	0.802897301341	0.741985528617	0.790062313328	0.668824698694	0.788247043239	0.6140643982	0.592968408948	0.532376292863	0.627262279826	0.76149116448	0.608562413918	0.91889541372	0.635622321474	0.640946342398	0.843261430246	0.670811111571	0.924356687307	0.684669769341	0.67348933134	0.656227730884	0.797508910705	0.798270945976	0.743362708558	0.8 [...]
+F12Fcsp.140694	0.891845777788	0.780863209915	0.836138517898	0.843625267392	0.849556130625	0.822489502213	0.879776807214	0.842920712286	0.830542559139	0.867376679533	0.842324778305	0.751803895123	0.769226459505	0.76037828176	0.836875443607	0.843151755681	0.795093459699	0.811830820084	0.902762029286	0.85162070732	0.920364384815	0.84299516049	0.838207938523	0.900162289184	0.805738231183	0.943559706011	0.874006897689	0.815439144286	0.703901218876	0.896135250218	0.609246507603	0.853280255626	 [...]
+F31Plmr.140385	0.838184290588	0.609763008052	0.579178051826	0.646656963729	0.625203811385	0.66236740169	0.742838378287	0.624390670622	0.672084668188	0.805902592963	0.674149241102	0.856062367386	0.687522415602	0.84000812432	0.547136214647	0.571179454562	0.575063169063	0.541022564317	0.719288937651	0.584859213926	0.913339430634	0.60530826478	0.613278961153	0.84624801474	0.64000258495	0.929477803016	0.680666252156	0.663021133051	0.695466092577	0.807277620254	0.842955151604	0.735864597923	0. [...]
+M22Fotl.140555	0.833418112157	0.711120612867	0.657896436038	0.821379227972	0.654841481071	0.702278678704	0.773119872643	0.804344891561	0.814282475079	0.832131459243	0.808831779819	0.861148051883	0.780684947947	0.867412285351	0.667619660221	0.666125823317	0.644304146915	0.74582111529	0.678301971423	0.673693622402	0.927292815511	0.607892874925	0.735646738163	0.878880181111	0.75954788724	0.931921227953	0.687453402558	0.629293676122	0.721494672973	0.86448555079	0.842339434804	0.781841065793	 [...]
+F32Fotl.140455	0.716204837627	0.695536732839	0.686064154413	0.842925613585	0.619308401949	0.644095536877	0.679056978122	0.823490937301	0.822454835371	0.732858300084	0.838754024009	0.896095607287	0.755134426051	0.902594134291	0.68437013769	0.624420496137	0.642433692246	0.766112142776	0.591973271912	0.686101214798	0.876836348503	0.636863994513	0.660961325837	0.790601316987	0.683385699477	0.892777099402	0.586212009445	0.657178541428	0.728900421252	0.740753086287	0.903263384315	0.67130392179 [...]
+M41Pinr.140509	0.680454810825	0.679459391358	0.703784598546	0.800235737949	0.666951346211	0.628253473465	0.525189222862	0.778452798183	0.774766214649	0.595173951414	0.792756460198	0.892850187525	0.635303100338	0.870886437798	0.698417601645	0.685459101808	0.685957114518	0.69628500969	0.702266727355	0.717093011023	0.759762224999	0.660064640182	0.656390277961	0.684106665827	0.477191562374	0.834489072496	0.680276100015	0.772770264453	0.753693403144	0.582653855233	0.878816174173	0.59279708753 [...]
+M11Plmr.140866	0.739423919204	0.643103303973	0.592031718334	0.74618297542	0.627264212244	0.616256625173	0.676584612318	0.717813266638	0.738478413294	0.735185011584	0.72364656473	0.878259443807	0.687395601336	0.849433475537	0.608629394154	0.585688459897	0.565089103761	0.630973660775	0.693310999494	0.631837326905	0.879420264676	0.58549786742	0.58532667464	0.783087113971	0.571150451027	0.903366760444	0.663290115732	0.680042238648	0.708064771509	0.740151435849	0.874066242953	0.726465447406	0 [...]
+M22Plml.140625	0.843728588558	0.63848383336	0.699808921394	0.725064446008	0.709884507593	0.725311981956	0.789405683873	0.709634877551	0.677831359222	0.829832220678	0.715639180669	0.728860791981	0.686565228464	0.664082656811	0.675482759103	0.697185057578	0.61541063666	0.654400498301	0.826176412986	0.683843373084	0.925842764056	0.724156783082	0.668306020116	0.844052017802	0.70929390814	0.939905797682	0.770702214885	0.722665223743	0.630350593125	0.825564138447	0.713216623981	0.806070239481	 [...]
+F34Plmr.140416	0.697153877171	0.674524762426	0.607136064427	0.813325317192	0.525911129986	0.618200386793	0.638967229394	0.790763248118	0.791428972859	0.713916828507	0.810288528307	0.900223115357	0.727686889934	0.890685309422	0.679247800859	0.613787752462	0.602044170648	0.713700343958	0.61816923101	0.672271588181	0.866514662107	0.586685507929	0.638204732336	0.765860093319	0.65444284779	0.876273202563	0.566626854833	0.680481437077	0.73859889119	0.713312108684	0.89608817883	0.650788496768	0 [...]
+M12Indl.140438	0.649132944192	0.704846838723	0.65287374692	0.77682982012	0.693324993978	0.728298989872	0.583112482613	0.755348777365	0.752658849964	0.660891375769	0.799644002775	0.90336922681	0.642407889731	0.882607994682	0.686501260468	0.679427767362	0.696859872792	0.668263644961	0.805232140813	0.700137715118	0.82359951335	0.680260111402	0.713650204831	0.654718298623	0.633157341064	0.843053073728	0.711737094725	0.789731905035	0.743931753307	0.670860514463	0.890018767613	0.695872490959	0 [...]
+M32Aptr.140847	0.576045849001	0.803454380256	0.824615713623	0.885881212823	0.818681907348	0.707160847501	0.505914613883	0.857166309062	0.857775653991	0.474059992776	0.860774614392	0.918713954556	0.694999074071	0.910272867987	0.841160105078	0.82336777772	0.82715719984	0.799063267524	0.814567672247	0.84012172691	0.67450684083	0.809086413637	0.776424655039	0.428508359033	0.660232442149	0.683750179621	0.804728210967	0.877487663367	0.825130817363	0.500585978529	0.924797734733	0.62049687939	0. [...]
+M21Fotr.140859	0.77762616176	0.742318826718	0.703106287061	0.852126755684	0.67133085536	0.663340060356	0.726634193718	0.834725276641	0.838125129408	0.795307265346	0.843510748978	0.901440156767	0.776316320024	0.895890998489	0.719345219741	0.649388281954	0.667920886835	0.759520322005	0.646364329861	0.701186634407	0.905527656712	0.63367254025	0.70777771038	0.826702025386	0.743411746923	0.915116777115	0.655409266172	0.654389145355	0.747885326586	0.802125945068	0.896293849472	0.739682036201	0 [...]
+M24Fotl.140320	0.811148503354	0.709411352061	0.653773357525	0.821832275694	0.657110013073	0.696794909867	0.737300675368	0.802963278991	0.800723609535	0.79789893178	0.841051585857	0.908784310093	0.756941675374	0.903754966745	0.709450118406	0.612172621491	0.679411512275	0.741361547935	0.62547860433	0.667126719891	0.908374413319	0.645860494916	0.680805857319	0.840330277337	0.739838703022	0.912581429858	0.647432145705	0.654620731372	0.733312085851	0.811345969968	0.910677093915	0.731615941157 [...]
+M11Fotr.140751	0.571963199751	0.695735045508	0.674917414935	0.828727423861	0.663345803513	0.599713879036	0.615799367538	0.806372969376	0.803775071653	0.61570938087	0.812442633547	0.880396229656	0.646971383064	0.882731667218	0.692825609274	0.671272815766	0.682585970764	0.733640358242	0.676850925929	0.707487787671	0.831069965645	0.681019212427	0.67323066045	0.688199602644	0.676715303078	0.82908375618	0.635183314208	0.757884167454	0.733709296474	0.613127593857	0.881097784498	0.629943348264	 [...]
+M21Indr.140425	0.850542926501	0.633240597855	0.649621423612	0.536128355652	0.703780893179	0.712513619173	0.788114595039	0.503379967753	0.456856822077	0.805901464057	0.553767430068	0.7890596854	0.624555677093	0.704296976863	0.579587901717	0.649117627331	0.623328550185	0.501681788947	0.841513956251	0.590983375072	0.911127025227	0.705922506807	0.645632290882	0.83754160735	0.659214363543	0.927630504313	0.791820509357	0.755295353954	0.663442273195	0.814397448375	0.769503168634	0.75428787275	0 [...]
+F23Fcsw.140551	0.937447295365	0.834480524596	0.885285697849	0.859377131748	0.893844799571	0.873688765915	0.931910203029	0.875099374603	0.843813859956	0.903134940601	0.856533498968	0.767084112135	0.817985501785	0.697197030553	0.894635740995	0.890089548805	0.847412847542	0.864558279266	0.926967951852	0.892619968936	0.963442478627	0.887833868393	0.887864518011	0.924736713757	0.862338536953	0.961024888562	0.922040750291	0.866645635785	0.736307767362	0.918905295925	0.579286059794	0.8966317382 [...]
+M33Plmr.140723	0.814681316995	0.617480650687	0.563167778494	0.660095527953	0.644583978234	0.588524245144	0.696859513245	0.660052892576	0.681876817812	0.781249164219	0.70579038818	0.849736444772	0.65467065854	0.831978678336	0.55589559284	0.569302642603	0.577977611997	0.558701822013	0.748083839107	0.540216444219	0.90402578904	0.580930627998	0.598540070622	0.823119028485	0.639683587342	0.920381950344	0.699076026315	0.666515134089	0.684333445524	0.801923557354	0.846811096196	0.723258412536	0 [...]
+F22Indr.140640	0.798925530046	0.645316002232	0.594948131501	0.670635013796	0.640394712094	0.650898882909	0.705570084886	0.662187430459	0.677143380384	0.746926454661	0.667311539465	0.866532258065	0.68008473679	0.858840983505	0.56978666238	0.594588371143	0.560763087941	0.566800406515	0.711618786097	0.5947915966	0.894649522191	0.549345718604	0.572148665653	0.817503272065	0.568391487379	0.909892419585	0.662339853364	0.696467207159	0.702695192777	0.772828763263	0.873796187021	0.711220113341	0 [...]
+M33Plml.140822	0.833476513965	0.641404041327	0.635929798854	0.752455104295	0.621142938564	0.672795347156	0.747718171955	0.72563559629	0.706079681608	0.813711334833	0.721437568447	0.870193333141	0.681452135029	0.841205476907	0.648664385021	0.601838215256	0.593259188414	0.65832290222	0.744685637428	0.565873414754	0.922576683712	0.610063651395	0.618468661549	0.857228198515	0.672038770556	0.933241063094	0.702380026802	0.625179829731	0.62724936554	0.836376615225	0.858001165348	0.757140797911	 [...]
+F34Frhd.140568	0.68952382983	0.619066739231	0.621861135251	0.732649908134	0.664400113863	0.492247305969	0.590074408569	0.714105513364	0.710083882519	0.617155030116	0.688107998131	0.867396897679	0.563324384406	0.840769823472	0.651586054088	0.58429942216	0.589649504	0.624755107454	0.693709400585	0.641601522108	0.837851136649	0.617299090792	0.541461785636	0.695955753621	0.488280620863	0.864018353657	0.655649210904	0.726303527594	0.674316666581	0.636202886075	0.868364953556	0.596263565462	0. [...]
+M12Knel.140275	0.76158115686	0.610598416123	0.633082736642	0.729745313564	0.691166394514	0.659127147699	0.715723472044	0.750016198821	0.738442398081	0.764300210634	0.740578881018	0.825357235949	0.588276070007	0.823265963272	0.659047109889	0.664269189106	0.649305461348	0.653204275406	0.787200459911	0.670878983532	0.88462680269	0.668431012946	0.639011989495	0.783533677191	0.6909249339	0.902532531225	0.727461264818	0.664234103313	0.627842497338	0.740531842009	0.775543678357	0.76592944933	0. [...]
+F12Plmr.140839	0.842105299196	0.625660277031	0.598489634677	0.667495956596	0.61458268708	0.733165308543	0.763137920676	0.620322828518	0.600278588857	0.829289897943	0.669620737177	0.872094684702	0.696742549661	0.830699702839	0.505531236253	0.575732511272	0.569976744796	0.592325355598	0.704460228893	0.521365608759	0.92815714807	0.568565575096	0.645046202862	0.858502575825	0.701231087502	0.936571782613	0.71324881882	0.620949332369	0.668382118384	0.827731098204	0.861689682148	0.770609572085	 [...]
+F22Nose.140758	0.567198317548	0.77790209766	0.756740111485	0.820313817928	0.762237356737	0.676339002645	0.661390369116	0.79507942749	0.788627033879	0.457900703016	0.784262531574	0.882484435413	0.666784908383	0.882987337075	0.782173502444	0.782762932138	0.784313292885	0.768270036014	0.776811111202	0.807420898263	0.69444182788	0.766832890584	0.738210848019	0.631233793127	0.616494763396	0.740169269548	0.718081062649	0.847088192646	0.796996183971	0.483886726445	0.883131464969	0.51340279773	0 [...]
+F22Aptr.140609	0.505698503463	0.737979799171	0.733959238768	0.875712035634	0.738230449027	0.638813333389	0.4444589309	0.843842985482	0.857789108169	0.593500203156	0.851612169122	0.926619885353	0.651165926811	0.919440546337	0.770550111329	0.729985180779	0.747618902291	0.784792726495	0.754692913714	0.774261617419	0.782254446053	0.741615745989	0.701457340109	0.500979366362	0.626895334309	0.793183556676	0.701528562698	0.828772976106	0.744723723426	0.586294546137	0.907897857665	0.636733615222 [...]
+F21Forr.140353	0.822605481503	0.62843241202	0.536358799231	0.699043071615	0.602996414279	0.668779121667	0.704410790419	0.653787027558	0.663774099375	0.806499205019	0.707126969324	0.866635030796	0.721037646358	0.845216801517	0.520222925988	0.547017595789	0.554011373884	0.563662507537	0.715711875572	0.550864328439	0.916073020607	0.529863828721	0.598836758284	0.84403924837	0.635723774729	0.931301276016	0.679236824147	0.649395838942	0.639050972121	0.815975447459	0.856559924626	0.743018647373 [...]
+F31Kner.140761	0.805409063539	0.674650963466	0.637377674105	0.802781611343	0.605744187004	0.685624881368	0.725948966788	0.775073628719	0.75775820396	0.782151689307	0.800689301007	0.855744586761	0.725963595319	0.837582580848	0.657275344457	0.608039623008	0.605978469263	0.717056383525	0.678391424079	0.650854372278	0.899358819877	0.623246850906	0.669974872719	0.837040796213	0.723907474044	0.917651801601	0.628025789346	0.621515914589	0.686454207467	0.791016377265	0.861990739233	0.75001230704 [...]
+M32Aptl.140296	0.556682300495	0.8113674016	0.821145775279	0.904380368969	0.812613631379	0.672026771897	0.526562754649	0.874204954467	0.875903726879	0.533153842671	0.88304357954	0.931712841641	0.705351139335	0.933489460264	0.83047863887	0.815653901061	0.822296805875	0.801048240967	0.806662346173	0.845763992907	0.642824302603	0.796979553543	0.788284244222	0.37797627514	0.669365687131	0.649942646042	0.789787084684	0.872947613155	0.846197409452	0.599214289798	0.921115049436	0.548095644004	0. [...]
+F14Knee.140634	0.808079255863	0.591476113693	0.609017356251	0.746904783588	0.607400083768	0.671800616675	0.742265461751	0.713049465027	0.684190840951	0.794939983032	0.71835431534	0.858660767563	0.646883346453	0.812492867897	0.61278587041	0.543964203285	0.555370144532	0.639433806239	0.699369396478	0.590730031711	0.90825017363	0.612034674122	0.614824691638	0.834031235318	0.673559546667	0.927298953164	0.681374051513	0.636497548452	0.633707450805	0.793278366213	0.837515697178	0.775093367598	 [...]
+F32Indl.140607	0.834060899693	0.608960334763	0.577800088994	0.719354516101	0.592838845374	0.676410716456	0.682767047871	0.644408577026	0.66078498881	0.778863475469	0.694040517848	0.861166080554	0.671249720545	0.836427782571	0.552962227788	0.583857894788	0.545903366435	0.600610265248	0.719926006137	0.578346740232	0.870806066223	0.551975547694	0.603278511751	0.826674595085	0.598899910114	0.925228308351	0.672070139796	0.687725718521	0.703523334925	0.773571680544	0.856560179579	0.71135684903 [...]
+M63Plml.140689	0.800854741459	0.606226023604	0.626739165733	0.767128191167	0.641956012135	0.638669078433	0.663592685109	0.718429771141	0.744566829566	0.751275976797	0.7387292739	0.871587539638	0.676485055945	0.863449312011	0.640805745039	0.583987943189	0.558226481122	0.651478535636	0.704962780273	0.630039002591	0.887453698646	0.62036940299	0.614070141414	0.804050911455	0.61884262101	0.909524152551	0.646447334776	0.676025819919	0.697907723787	0.764057036304	0.878636793	0.735073632903	0.89 [...]
+M42Ewxr.140604	0.547980378074	0.83268372602	0.834549899728	0.88442923373	0.814452764406	0.751994333855	0.668972714709	0.852998020023	0.841933168514	0.577728384772	0.858131954781	0.931194272787	0.734365869244	0.923054552476	0.855607946016	0.837878140302	0.820450719531	0.825998191078	0.818621174042	0.854204284937	0.479730293175	0.828820843052	0.803028576197	0.640042620635	0.626392061173	0.653797299104	0.765670631322	0.886790914903	0.843490587081	0.68417861937	0.921488178457	0.635650511506	 [...]
+F21Indl.140446	0.727347654628	0.698978128792	0.672974329342	0.780482606227	0.600921655971	0.651531128476	0.649833681478	0.756689960389	0.744634048681	0.756694207391	0.779488928214	0.899337562813	0.735109882553	0.877466500176	0.661812561636	0.62666052072	0.656944334268	0.704965168647	0.632091765785	0.659774303916	0.861854906924	0.610226080321	0.655579934017	0.806496784194	0.647522641717	0.884519663573	0.596162354617	0.722187958771	0.746575985142	0.776075919982	0.893179451467	0.65959251408 [...]
+F31Mout.140605	0.857571375188	0.744020918196	0.6693177731	0.326122442547	0.740844957832	0.750332427107	0.826478781917	0.278185555863	0.33088675309	0.811294294056	0.325730207168	0.844946313529	0.68021745033	0.798079813792	0.588375663064	0.686624774827	0.705464339736	0.510161884024	0.829619145009	0.61295061279	0.910913319095	0.692541903393	0.716959673872	0.897661426309	0.654431757242	0.910890184874	0.803330165089	0.821046152855	0.766014483123	0.845859771182	0.849737268726	0.734023322058	0. [...]
+M22Tong.140364	0.849503987086	0.745882498196	0.683041725289	0.327167157049	0.763399950714	0.747999492315	0.821883184452	0.328350654343	0.387503582688	0.785154221818	0.289116565443	0.827942280222	0.712261545564	0.767304501223	0.633297228905	0.704228018711	0.721835829475	0.533281424587	0.847741907967	0.648929582976	0.908729063447	0.724799891779	0.717269328693	0.881930941319	0.636404561825	0.908522841559	0.819571086375	0.81569731725	0.764850894158	0.830629534637	0.821873574567	0.74577999124 [...]
+M31Fotr.140572	0.803270988873	0.628982420791	0.584967907222	0.77150446164	0.629298276412	0.578128070789	0.721621537182	0.748870800673	0.76473237311	0.782005156461	0.774139432623	0.859610436894	0.67597447699	0.849253634254	0.642741323968	0.572387984649	0.596914585255	0.669263744742	0.720353988163	0.637901842466	0.913336617683	0.600938086784	0.596590209186	0.834744081636	0.678987123671	0.919471090193	0.662012634485	0.647960126521	0.682716226349	0.794088021107	0.853258000686	0.742400862494	 [...]
+F21Nose.140573	0.605511328258	0.760250125762	0.769108334416	0.836818379437	0.758624723478	0.660828751587	0.48202406388	0.811637414476	0.809420551741	0.589877893806	0.814240156835	0.876592740698	0.690565580719	0.873771819459	0.789114056095	0.766242433317	0.780792369159	0.751362885964	0.791196759129	0.804761187983	0.725414691136	0.754340677424	0.750212268894	0.47768009727	0.633357971118	0.759561393893	0.762415873686	0.843655003155	0.815890825657	0.569113230563	0.903621175547	0.636405150249 [...]
+F22Knel.140720	0.803360304709	0.613810030681	0.630133616432	0.743031667705	0.661071134862	0.668337467232	0.737792901232	0.717289756907	0.682749249772	0.786169504668	0.767643370785	0.82541393896	0.65992671168	0.782188375732	0.633690385954	0.599346089635	0.618486401456	0.646676523112	0.771652723066	0.651508737397	0.907540397336	0.641756509275	0.616232595521	0.80512596381	0.675057571281	0.913624260857	0.72481424667	0.674747745225	0.585462102447	0.792813173863	0.789888885784	0.776228067169	0 [...]
+F11Forr.140422	0.83513814266	0.664720650783	0.588707240082	0.713113212975	0.648883049208	0.707567159206	0.790916226771	0.639335980702	0.667068238742	0.826185945769	0.713185563079	0.879406627366	0.741296096773	0.851189072974	0.50304030162	0.614340658088	0.629878516064	0.606363891129	0.754383443928	0.558453201005	0.901448341243	0.612580210032	0.670928623065	0.864810910741	0.671121196443	0.934947333405	0.720612042221	0.655323756067	0.681552918397	0.834262453927	0.870595330686	0.76981322979	 [...]
+M42Fotr.140861	0.508704867923	0.725346837011	0.715961894709	0.834723567362	0.723791671627	0.583557202934	0.612133679067	0.809149520475	0.796304124271	0.583676741286	0.816904330608	0.906743202153	0.683088243068	0.90772852433	0.748472043239	0.720632178247	0.72847772106	0.748239199717	0.631267269194	0.750700390935	0.776808936163	0.705776982464	0.694438540007	0.725496635727	0.649086370358	0.771913695157	0.604978975477	0.798772239891	0.774631188922	0.59238920962	0.899866795351	0.547021496628	 [...]
+M32Fotr.140562	0.815113331208	0.662673685748	0.65406902216	0.818700269981	0.665154070466	0.603774074382	0.745196875505	0.798129833623	0.810684994172	0.796182365642	0.799185140308	0.88597253753	0.71779286251	0.876522250155	0.686704210522	0.633219773802	0.626555111817	0.708696964129	0.711219492431	0.667049899883	0.918219928567	0.640644430976	0.612440749863	0.838612865039	0.721709683645	0.926870605648	0.701156240924	0.655848163745	0.670577872379	0.819063165934	0.879036599759	0.753335309813	 [...]
+M13Tong.140491	0.900831968993	0.758381124885	0.68326926601	0.167113417484	0.772981863223	0.793312075582	0.861245569974	0.367363897113	0.428743050173	0.856141689571	0.320998385069	0.84355528662	0.721941207002	0.79969940986	0.635877913113	0.715517235481	0.73635482822	0.539750254056	0.850821604107	0.643721552574	0.920147709463	0.72356425612	0.735084718975	0.915524562624	0.702051681405	0.920783976209	0.8300275465	0.832967663681	0.790532314435	0.842177128981	0.854210202746	0.785078401947	0.86 [...]
+M13Plmr.140301	0.788853251562	0.614339383101	0.531750594721	0.698372629703	0.619695084329	0.656195270113	0.707981298623	0.663210129541	0.689307494499	0.779404640638	0.704150016245	0.877097824662	0.689882133411	0.840248312981	0.582569392312	0.56745420903	0.602536186569	0.606087567074	0.695522990235	0.569752683045	0.909937399176	0.593214008228	0.589674264294	0.814326620421	0.680868551733	0.923765295298	0.665873928789	0.643403533692	0.653574146889	0.778968414336	0.863419998914	0.76423316070 [...]
+M21Indl.140576	0.855590601685	0.599895385235	0.664643260278	0.631483482005	0.686614532182	0.715118031484	0.782637480624	0.573349394717	0.542648176292	0.825838236943	0.585577455749	0.774025489533	0.644673937983	0.704302362681	0.563061272685	0.672039914074	0.591940276364	0.539557113393	0.811237737561	0.604302156005	0.920488511683	0.688269584177	0.6346386905	0.852291277069	0.673456682193	0.930060429374	0.781071273498	0.736605703696	0.660378253496	0.816074620367	0.775674876018	0.761247660749 [...]
+F21Tong.140655	0.904749777683	0.756726377867	0.6806835745	0.268760383221	0.788361035422	0.768261480088	0.863727417322	0.373628550363	0.429069571966	0.851627687844	0.288598847602	0.823128492937	0.728168579268	0.775064102777	0.624926731664	0.719761787795	0.7338977293	0.512751993308	0.865960193857	0.654284307854	0.923631682385	0.736546024792	0.738429531509	0.908954248576	0.707695886706	0.92447533491	0.850724765823	0.832494209385	0.794819273198	0.850766800345	0.831229737666	0.789508388281	0. [...]
+M12Forr.140849	0.673366801927	0.638909957703	0.617360978484	0.833423285635	0.656087849731	0.578590766246	0.616818488922	0.796273107052	0.81605639056	0.684003873154	0.800240162555	0.880754821849	0.673842563201	0.884259130933	0.630389340904	0.647870084522	0.615422214927	0.673507757884	0.623854818749	0.677320806355	0.86539929222	0.622703314974	0.623556115569	0.728457419553	0.676877607191	0.885527269321	0.587728524919	0.702776381827	0.728258699377	0.669484283672	0.890351066203	0.669560930021 [...]
+M43Plmr.140716	0.833597423608	0.644722197231	0.62638349326	0.600880863612	0.674978546651	0.692483013914	0.711169167797	0.611611048619	0.63576386851	0.802451048788	0.682587027243	0.873989009049	0.658969752599	0.858984122582	0.524978094605	0.545416147789	0.58376656842	0.561897886426	0.748944407514	0.565110955699	0.887074487052	0.618101942774	0.656940568876	0.83157183849	0.638438231092	0.930992997622	0.699401909817	0.691814983557	0.676363618967	0.815511056278	0.87482787015	0.742342398816	0. [...]
+F32Kner.140397	0.83326435581	0.651910846462	0.662008397492	0.791402010094	0.6459026545	0.709056207832	0.778381047778	0.759302115911	0.751873469551	0.814101843768	0.801264238406	0.8801417342	0.692743413493	0.870517452106	0.630545397886	0.596313215161	0.6326850243	0.714007488665	0.710907192206	0.640330749548	0.92212237412	0.617189099624	0.672381581872	0.859058117809	0.715035094912	0.930664948442	0.7093545297	0.635281030631	0.684509061379	0.830674557898	0.873966337858	0.779373291999	0.88779 [...]
+F21Knel.140372	0.792450474557	0.604784594404	0.63510383832	0.774141632332	0.690367334168	0.644446622177	0.677287336002	0.72984109615	0.755462452164	0.779571841533	0.768645558875	0.887718223092	0.721925371135	0.856590786396	0.595945562715	0.593127726295	0.629148417746	0.668905183885	0.701863745561	0.610755377338	0.907564835819	0.611083214719	0.602922329539	0.814531019202	0.641996871396	0.91515831933	0.683858674957	0.6542425992	0.665519835956	0.794197282612	0.866327048712	0.760792534487	0. [...]
+M31Plml.140743	0.784121683262	0.61700398321	0.585818038248	0.620235795223	0.656112838525	0.662630955818	0.703710368715	0.580714857943	0.60795519177	0.732808821771	0.59607414219	0.878746872006	0.591970955663	0.848292883232	0.531968964867	0.597123324439	0.628610500877	0.477141669554	0.759204936348	0.631951261568	0.886751471845	0.624011742883	0.532690422093	0.790361071584	0.577882439948	0.908379438601	0.687237596037	0.747601231442	0.698932862682	0.734274604505	0.866917764102	0.66274071045	0 [...]
+F11Pinr.140867	0.718753367306	0.604774343402	0.582719739486	0.701447785334	0.611400720283	0.604522230562	0.62814441711	0.645080944263	0.662039946323	0.691808716578	0.62800744752	0.856957913644	0.588970230116	0.831859663627	0.570088464361	0.582003531693	0.606426372247	0.612498292693	0.7386578047	0.57160874117	0.850658330712	0.586748016141	0.58671723411	0.753289074066	0.480764463714	0.882466642949	0.659089951433	0.713104323569	0.67832392378	0.734593493706	0.841392810145	0.639645214001	0.86 [...]
+M24Frhd.140442	0.66971522392	0.651260622128	0.647333484129	0.782030240565	0.639133514561	0.593970200311	0.598649838819	0.759711575074	0.755078412995	0.61131839916	0.77253592762	0.881177259316	0.674242010131	0.872705015035	0.679535387738	0.657964653158	0.612981078698	0.664015718168	0.659697368354	0.679077445585	0.825430104516	0.664767129983	0.624220037714	0.67158478875	0.608107580386	0.845478496117	0.584706856003	0.744178962886	0.714280959276	0.59054742017	0.884428626547	0.585686882146	0. [...]
+F22Tong.140837	0.908468239749	0.767148557805	0.703362850943	0.149100366023	0.786328632229	0.79231182307	0.867021122423	0.36567562686	0.420788653591	0.857408963001	0.338809516237	0.840265237744	0.734863774857	0.796536186878	0.645222754865	0.724466010801	0.752444678075	0.548510435546	0.861305771105	0.660608031545	0.926910826109	0.743736425995	0.733896733852	0.912632965168	0.715554894204	0.927928799233	0.842923041344	0.82940457238	0.792923570523	0.844496402132	0.837301266894	0.794915124821	 [...]
+M53Plmr.140520	0.766302551006	0.657546171614	0.588588592936	0.625915907602	0.645717303312	0.638407599795	0.708691136329	0.582714058448	0.616516329969	0.721533331667	0.628334033694	0.861613500738	0.668109411006	0.812384549534	0.570776024624	0.594447026764	0.605140408996	0.542018244191	0.755229762238	0.58031628392	0.878927622062	0.573132418989	0.625322170607	0.802387382402	0.5604130894	0.897696752476	0.718249642392	0.719022018011	0.703427087039	0.76034302088	0.860202420825	0.672241556404	0 [...]
+M64Frhd.140834	0.50987863584	0.793160896383	0.791484750668	0.876016028229	0.762301591555	0.696560185729	0.543873020081	0.84649156319	0.835425312524	0.625655309488	0.849235892633	0.936802642533	0.689405621156	0.929316167323	0.818213805746	0.786918463284	0.79568001084	0.804461801201	0.744933491316	0.817839919057	0.678289911528	0.768829727386	0.762869490305	0.624620504911	0.614663580257	0.708161997661	0.711182776479	0.856509390357	0.810010757844	0.711918151359	0.913176809968	0.557737377314	 [...]
+M21Plmr.140842	0.849703756773	0.632057714022	0.669788448005	0.673500793314	0.67929616139	0.712635033859	0.79108043536	0.640301085884	0.61245604474	0.822748049052	0.641662618881	0.762898347982	0.655792175687	0.695205917279	0.601112365064	0.6583447625	0.575786099074	0.602453185636	0.808425411887	0.609847957798	0.924800661149	0.667923920034	0.687888691161	0.845426102719	0.693468717573	0.935820798627	0.776995145737	0.707848490933	0.639512402957	0.834972972554	0.759663287099	0.785484575265	0. [...]
+M21Tong.140323	0.8792216248	0.778542335336	0.711571589237	0.309888983297	0.783690896282	0.787828472129	0.858002258053	0.377155388929	0.374570693816	0.825220179044	0.275056913934	0.83347440401	0.742053195713	0.770482591856	0.664186696858	0.728632519907	0.74517539811	0.566623372566	0.868240465552	0.665399070875	0.90037733035	0.744451071594	0.756565576154	0.896481767902	0.670955780651	0.899393606189	0.834788306408	0.842708974572	0.786190251855	0.830209508461	0.838594063271	0.78630005865	0.8 [...]
+M11Knel.140676	0.753966202802	0.576368712278	0.579432734398	0.742418107921	0.640605770155	0.629556002889	0.71592049063	0.737648816859	0.748620198436	0.74863390501	0.74566440313	0.806311717091	0.62567258152	0.798425765494	0.639324865401	0.617577083091	0.585420121515	0.650276100755	0.739568257247	0.643039660239	0.898907855156	0.618496325195	0.592914129997	0.80076715102	0.671815806717	0.915061464932	0.668935218052	0.670309204927	0.638681297661	0.764966688671	0.799687277051	0.747346134698	0. [...]
+M53Plml.140660	0.740355626237	0.639584330811	0.599438225727	0.614277257374	0.653829557634	0.582079046628	0.72375200536	0.645607806058	0.657586016523	0.748644735801	0.617587912492	0.833404632311	0.637201073866	0.830248504158	0.555430757152	0.62630263146	0.603756515685	0.557152508348	0.725041745237	0.612649950166	0.876036400859	0.602473080242	0.618537367551	0.79315358819	0.595253904792	0.898076406634	0.676880505176	0.713344646276	0.707524177611	0.778689725138	0.835704776724	0.648446689882	 [...]
+M12Pinr.140598	0.571600437075	0.661346873628	0.65733517903	0.804470709392	0.663171052925	0.564862034452	0.616415452743	0.782103645664	0.778577144396	0.54642363659	0.786620398263	0.86208505676	0.637311876756	0.853450390624	0.685662301472	0.662010859819	0.642716637503	0.693705658984	0.711517052312	0.664438508665	0.780129639932	0.632090723838	0.637426160456	0.650467797406	0.567748428153	0.828214081819	0.594163415911	0.752304170658	0.727008132675	0.573018880815	0.855836064117	0.584966941377	 [...]
+M32Pinr.140389	0.698858386443	0.693359469454	0.704844238138	0.815770331539	0.694933177733	0.655842057567	0.586411013144	0.774442805179	0.783038073789	0.576197854898	0.805464560513	0.903786947488	0.597258225894	0.896583147753	0.729176219087	0.692387353234	0.711658531493	0.696924648226	0.720788949423	0.740181020898	0.790783586101	0.682411223484	0.648826568376	0.652591234794	0.580147844747	0.836420736336	0.655095273384	0.778779242232	0.739065837534	0.578201332279	0.883473444496	0.5349269003 [...]
+F21Pinr.140773	0.595204476655	0.8019341224	0.802346632907	0.861808134439	0.79684782972	0.709589786984	0.577635403666	0.834377023719	0.833549230774	0.508965000277	0.835457791174	0.919968510466	0.676978520756	0.91170481778	0.828060313241	0.807896232744	0.789073368544	0.801683726361	0.820366393765	0.828573500496	0.569130319995	0.79062039066	0.761685797227	0.551431712296	0.583080504081	0.708733501425	0.766696857549	0.863188982362	0.808143488064	0.595022584449	0.910984681044	0.586408027994	0. [...]
+F31Tong.140785	0.85587927236	0.7521124635	0.685064264822	0.271666228102	0.760579948348	0.744775032819	0.825038953078	0.300436516955	0.328373852506	0.806589440347	0.299164388154	0.836958812549	0.675044729637	0.790112858458	0.62139661783	0.694423440658	0.725902012409	0.541722135699	0.831571644752	0.63429816501	0.912915997971	0.711252912321	0.716701497599	0.898636977025	0.63959133927	0.913047940247	0.807462999469	0.829006194632	0.764591908351	0.849697018692	0.84331714125	0.72830755365	0.850 [...]
+M22Indr.140471	0.855076221218	0.672760421196	0.694637403794	0.690079648425	0.704174705025	0.695724396188	0.772295033427	0.685136221947	0.616229776232	0.812562923666	0.697423561968	0.730907884084	0.672562753459	0.673233944374	0.654769700379	0.689739611685	0.637081373959	0.619052973204	0.807658866339	0.676748485606	0.913617212147	0.672338174456	0.681540273568	0.839053333042	0.677531836178	0.929516589112	0.773304007519	0.724239476366	0.664523507948	0.817834885656	0.724950348928	0.7693008286 [...]
+M13Ewax.140391	0.566842645264	0.882212761839	0.882471744965	0.896459284968	0.872037822691	0.800682619077	0.747534192945	0.899174862768	0.891464384146	0.649012515867	0.871384142209	0.930272820692	0.808915681886	0.928093396509	0.88888522053	0.882738913443	0.863494438582	0.870817136823	0.867291088545	0.89536048026	0.388898255497	0.869832640332	0.869684087537	0.639837564142	0.739982101319	0.561404654961	0.830696323858	0.921183981357	0.884523312233	0.616509803922	0.939333073948	0.734381838329 [...]
+F24Ewax.140807	0.758949777265	0.784580041519	0.780035552415	0.660314069864	0.802146372952	0.659487176646	0.78791379233	0.70432271358	0.688397490673	0.68823676132	0.608488923771	0.851889707452	0.65985293566	0.824043101218	0.760698603015	0.762620368761	0.741055421703	0.704917264728	0.79079955124	0.767530481511	0.683829558266	0.750082547502	0.718577855509	0.786239297747	0.547778332021	0.792746528231	0.789484843666	0.846681241174	0.798867375401	0.687113553634	0.881292661319	0.672108589475	0. [...]
+M64Plml.140628	0.837589118873	0.637291998474	0.571190033352	0.63744757608	0.640639778848	0.648503433513	0.75171707541	0.585015060815	0.589114714188	0.799838137687	0.647371164245	0.865151673367	0.702865950089	0.815357100274	0.562906718693	0.566377774701	0.546112292871	0.560473127549	0.756374664867	0.556617001458	0.910454225499	0.606933734925	0.634293723996	0.85198844359	0.627491371152	0.926232898913	0.707209484592	0.668755313036	0.698761533485	0.807029401825	0.850679920573	0.749661946758	 [...]
+M54Knee.140481	0.826608853118	0.67419639286	0.676972372194	0.783351931194	0.694570133313	0.705275684001	0.763385711627	0.763745880041	0.769504486191	0.817334295439	0.776979785105	0.81171186179	0.692751351112	0.800835533394	0.688558181319	0.656554675429	0.619664818102	0.699547404571	0.750529041536	0.686464908673	0.919126827746	0.672045044205	0.665702570705	0.853206069607	0.744031015022	0.935235001876	0.739498162286	0.650691579552	0.644445227621	0.826218567724	0.834499093405	0.809570123568 [...]
+M12Nose.140763	0.611390586744	0.830167566312	0.848786614276	0.887726968579	0.858789814741	0.754541190419	0.605357762326	0.891167409947	0.893954166497	0.552413221609	0.861553946931	0.915870762296	0.736261205179	0.913312821611	0.85897851204	0.843796524443	0.849687190886	0.820588308651	0.871035479338	0.872392080543	0.615767135315	0.836967057493	0.81776698194	0.372235111565	0.719930961774	0.638745757028	0.833036871195	0.902928407274	0.850738611477	0.396880376636	0.931650536485	0.700773974383 [...]
+M64Fcsw.140360	0.947771284858	0.792066999794	0.884493697916	0.836748412797	0.885779145173	0.879062933469	0.924670248973	0.860930724241	0.810389730483	0.902392335123	0.830926313596	0.716887245262	0.81972386621	0.686775980104	0.888907296418	0.882385054901	0.844496436053	0.861112954834	0.932722266105	0.870043455309	0.958017116807	0.8809299273	0.892593436978	0.947337903573	0.852736423877	0.955025827381	0.927892470999	0.848086998508	0.784408376306	0.910985902477	0.641298420803	0.904025452725	 [...]
+M63Tong.140865	0.900594651593	0.763994082566	0.700980457754	0.235095946911	0.777002302739	0.792904299248	0.867875377219	0.368388388924	0.407591070216	0.855801298554	0.314922150532	0.845640581018	0.728477319428	0.80191469971	0.639234254039	0.720548588304	0.736992201569	0.538983288748	0.852901758704	0.62917769267	0.919935705964	0.723984312862	0.74369945085	0.915316708379	0.706892154245	0.920558610999	0.832934604926	0.825912793407	0.782922216654	0.841785866183	0.848455078182	0.787548896797	 [...]
+M13Plml.140472	0.801624466413	0.675347733189	0.583911903774	0.755583468532	0.671785010467	0.694689818851	0.732247200502	0.755379110439	0.762172919869	0.78331002455	0.754499734346	0.858077479648	0.71464012294	0.847049086654	0.618020013019	0.649219585814	0.638401255443	0.662024726814	0.72187731491	0.667217970357	0.891376024804	0.626233909431	0.656011084986	0.796373748497	0.74150055916	0.911215641378	0.700440174979	0.678252983565	0.737255302801	0.751505780814	0.851252858147	0.770700095062	0 [...]
+F23Plmr.140435	0.802371903037	0.595044377532	0.55576386807	0.612692065097	0.615443317987	0.656004244458	0.701994420548	0.554183732785	0.574558469691	0.787056828382	0.624769417792	0.878618002004	0.694086421583	0.840752716094	0.507567512118	0.545968360214	0.50736121876	0.49804057275	0.71894103202	0.46755257527	0.907714396578	0.57239229659	0.572348260285	0.835658248779	0.578991198608	0.926376210322	0.658655607466	0.66654837999	0.676091230247	0.792871853795	0.866260174278	0.710432278066	0.89 [...]
+F11Tong.140366	0.863071515101	0.789006567437	0.741758537284	0.430194766112	0.765422582118	0.766481455353	0.83969424142	0.474724219964	0.433102895402	0.80220989456	0.306105854524	0.818798595029	0.721469467759	0.744781809938	0.704783903418	0.744204328591	0.739513428957	0.662597390452	0.855855337853	0.699187700703	0.870689955905	0.727369708346	0.756248451644	0.882096475097	0.627055071422	0.882173310067	0.823875835666	0.847339531326	0.790508489432	0.806909507446	0.842709534659	0.754736158112 [...]
+M14Tong.140420	0.898024507457	0.754860801113	0.67704184895	0.235557407683	0.771712109047	0.788509887391	0.864827366601	0.391629597595	0.451437699019	0.852116039834	0.382857524557	0.849818001058	0.728199238117	0.821463895891	0.625117910938	0.721294121059	0.731668011859	0.491809098046	0.855118285195	0.669842989851	0.917632595282	0.715892401753	0.731577065856	0.91306385672	0.702621415877	0.91810482942	0.830007368243	0.833416290683	0.806945386703	0.837544188698	0.843905302501	0.783010512577	 [...]
+F22Plml.140347	0.739604868119	0.6452984847	0.582233467625	0.769031807332	0.574360634913	0.616191908345	0.709862983229	0.729225970565	0.736758575764	0.736060738629	0.740153035403	0.882750578385	0.680139172363	0.874120354079	0.635883026828	0.58028981744	0.590985652146	0.667462146737	0.66142272292	0.635421348224	0.881560728037	0.574391828013	0.613222695581	0.802104237592	0.598380018202	0.896904865707	0.613205257135	0.658150773355	0.700916550941	0.750337882749	0.888195603412	0.685866381253	0 [...]
+M23Plml.140503	0.761883787589	0.675880679096	0.615566061891	0.732728871255	0.586069676451	0.646388575467	0.692262408832	0.742105983474	0.728285611597	0.736025096056	0.713804741666	0.84251587516	0.685319068	0.812426590564	0.678655312698	0.605546833139	0.570585162089	0.66600377594	0.696818253563	0.673790977421	0.850274533181	0.613654579236	0.671106653946	0.789014700175	0.686715530735	0.872059644904	0.607455434586	0.730601741107	0.759214616507	0.691206326495	0.853192037308	0.670824001853	0. [...]
+F11Aptl.140780	0.826184585174	0.616600749182	0.560739002087	0.664733275276	0.590947039706	0.664300096886	0.763296105316	0.604278690685	0.65485574905	0.817739153301	0.690804435954	0.872152617144	0.717776244168	0.849731448851	0.48203592446	0.542202536501	0.567199600523	0.574912735249	0.697804405896	0.571266207474	0.890562964231	0.583546353155	0.652754688865	0.863520055484	0.673974074613	0.932759403334	0.697989041223	0.641634992728	0.707792638144	0.831038408678	0.869416321261	0.761137775483 [...]
+F11Fotl.140558	0.688097503797	0.727972206411	0.698075017593	0.797320452177	0.691986103205	0.679264020801	0.687347370074	0.776101049967	0.748820308313	0.675881018763	0.771512847236	0.869840739988	0.69215461293	0.825539835767	0.753851966563	0.711694193836	0.713886749934	0.752789957648	0.737251160939	0.716198465046	0.813939739203	0.675067963613	0.725769965764	0.794001316383	0.599387640812	0.838752249763	0.700582198549	0.747618452191	0.733533087519	0.752041700316	0.827897156046	0.62871034452 [...]
+M11Indl.140749	0.738732177315	0.627404850738	0.597218536304	0.748854739616	0.591536516981	0.61762693032	0.659619366945	0.728803273109	0.723489388137	0.741646479682	0.74243900227	0.883665925211	0.684150879058	0.864247594033	0.606605780634	0.573131962515	0.551599449584	0.628573808857	0.684684499705	0.606263556947	0.896488689685	0.532581826251	0.582661026461	0.777109269483	0.57170961715	0.909812791712	0.642101401919	0.668614027837	0.680426969397	0.740432225551	0.876390354468	0.6883121706	0. [...]
+M22Navl.140637	0.746343756507	0.594408821399	0.638644960008	0.674270754061	0.62500302929	0.604829130479	0.706595819598	0.658216489452	0.611732844387	0.647967621999	0.621961475652	0.76159661472	0.59657349944	0.706353565319	0.656668555875	0.627796379473	0.560567476541	0.626817145456	0.772660300319	0.626496142954	0.859856893524	0.659248834755	0.629888413549	0.77621767815	0.551415841984	0.88098799375	0.704593473944	0.702235366226	0.642331984535	0.710186257909	0.788974166111	0.667732964842	0. [...]
+F31Knel.140379	0.733304400575	0.636245499424	0.6025105197	0.767248805383	0.610755530545	0.61151664897	0.641599918198	0.722075132123	0.73884614945	0.714217639568	0.772519829407	0.86002354872	0.696375907065	0.850251242009	0.657587638314	0.569785915866	0.633564381309	0.670803380502	0.69638069591	0.627575513532	0.87243519844	0.645826935603	0.63919566164	0.770900007614	0.621066989934	0.898620622025	0.61918206345	0.693995769413	0.705316213804	0.753670016238	0.862573668472	0.699400191639	0.8881 [...]
+M42Knel.140699	0.787532019433	0.598140494547	0.629902035141	0.810471717951	0.618295445407	0.685532744297	0.709114480319	0.795995004006	0.75972648119	0.766409946108	0.802314694465	0.838318713927	0.6652879592	0.83236799766	0.644846422388	0.621955760267	0.584331662842	0.689025860746	0.670449874135	0.630925145974	0.903345272097	0.599880060252	0.629375341319	0.798901477664	0.692737317199	0.913146815791	0.612220798034	0.638447766853	0.654838637614	0.745905567431	0.845384328844	0.769826454593	0 [...]
+F31Fotr.140874	0.812460920544	0.669962758876	0.678707954628	0.825291901127	0.65386145718	0.700001122372	0.760418739771	0.79846759868	0.814673640131	0.821661503802	0.814727311971	0.876320216198	0.791879955935	0.877763712933	0.681492442046	0.666779217656	0.64747921123	0.740238693663	0.702877616868	0.66634105049	0.919936821481	0.642593543775	0.716025668805	0.867231957444	0.743495005441	0.928134112071	0.662837682329	0.644186639166	0.71441300923	0.834639862482	0.871497475155	0.764203764246	0. [...]
+F31Aptl.140409	0.727023565239	0.683927462141	0.663585345617	0.776345637057	0.678399777899	0.644477883734	0.704425205807	0.745304723964	0.759354842504	0.713817189495	0.761203123601	0.858524510247	0.724912758035	0.827666720109	0.713522774973	0.649936898471	0.659638112576	0.725831100086	0.709375391295	0.673339556315	0.85988267268	0.711930731413	0.675309971402	0.7958612535	0.634262549225	0.883433106881	0.631336366724	0.752745810593	0.711262357226	0.752750398577	0.850081289093	0.685310260329	 [...]
+M22Frhd.140287	0.799652539576	0.656283032437	0.707049633501	0.743199096031	0.697709683696	0.676770827075	0.747518314704	0.722657379157	0.684193990541	0.75234351065	0.719680719771	0.711347934616	0.634824666368	0.641418331338	0.697492125068	0.686204653301	0.613176220798	0.679599514932	0.819667908718	0.695878916795	0.88954295729	0.719549879924	0.68064817548	0.814091394704	0.620097831855	0.90838355142	0.763138529811	0.733802405925	0.597062641891	0.78159892948	0.710642372093	0.751805308492	0. [...]
+F14Ewax.140371	0.640582160235	0.83793029463	0.833929713974	0.861193213683	0.801281586147	0.734410361179	0.661350179429	0.86705529908	0.857204299607	0.72811417734	0.828896327415	0.915113471086	0.785817060936	0.899409803644	0.849397739556	0.820381372922	0.821411175522	0.822403088397	0.769071181946	0.85690124546	0.604800480917	0.814976603219	0.807867902735	0.645325224173	0.739843529538	0.667532798056	0.767744431424	0.876697902536	0.876453031914	0.71202954154	0.929243667067	0.666651214844	0. [...]
+M32Fotl.140843	0.781414650238	0.630934649914	0.615966846059	0.808475483389	0.628067356857	0.635132998792	0.690029196939	0.779851852801	0.800749321465	0.760581642801	0.781813712681	0.853411114673	0.693487781776	0.855191460287	0.649656705356	0.629233813296	0.543798424583	0.672367053118	0.665176425203	0.620984934017	0.904991146823	0.59882626121	0.639389122803	0.805786921013	0.680532689633	0.913036227097	0.618691882872	0.613191492458	0.675167483982	0.781441987544	0.862226303142	0.71920690069 [...]
+M14Fcsw.140443	0.930974123987	0.773844812554	0.85574010621	0.842307213233	0.862509336617	0.850610934666	0.904884207078	0.855867612393	0.82782407488	0.895518156273	0.83849539805	0.648427659909	0.840034642274	0.645971959874	0.865211374693	0.863648623573	0.818433633369	0.839447764122	0.906549484874	0.870413386978	0.957517248413	0.872690129861	0.877013675803	0.919417206179	0.867356927916	0.955839344058	0.900942399651	0.833646263612	0.782705165776	0.90845252016	0.407618181474	0.895876454095	0 [...]
+F14Fotl.140552	0.577493781837	0.786901585355	0.791865918075	0.873006767041	0.76729285798	0.657035933816	0.610641023226	0.844316196474	0.833430679505	0.696547719098	0.846569983952	0.910192651043	0.771456051196	0.92035702285	0.800658298492	0.791400826595	0.79223163467	0.789154720846	0.700107275351	0.803134691355	0.75399055276	0.775927964203	0.753469160165	0.703682032151	0.727500140703	0.708611395082	0.669716829633	0.831854940804	0.841238079309	0.74405194675	0.923915039177	0.638359893877	0. [...]
+F31Fcsw.140492	0.931975455558	0.779482171314	0.841821325168	0.819446688329	0.847053537984	0.841940510875	0.90780431298	0.840436098879	0.802155149033	0.893867932318	0.810202566266	0.647879569178	0.828561928272	0.62686045592	0.860294663886	0.85415959498	0.805274896782	0.825330075661	0.905594584451	0.857175685288	0.951008547315	0.853403212333	0.868251293313	0.930082202141	0.85007711128	0.951755990445	0.887608831352	0.831705707658	0.766250073888	0.900900650188	0.537137687112	0.879558783297	0 [...]
+F22Forl.140279	0.794007879187	0.612643477393	0.576169641203	0.733092850483	0.603667329761	0.633096873403	0.696307558979	0.684577992435	0.706561231327	0.778507597255	0.70717850064	0.878666608085	0.689356014487	0.853329055647	0.579358641891	0.523536389654	0.563640389782	0.626622173504	0.65328853457	0.571162635098	0.908204740586	0.562799746752	0.590710159224	0.811170467024	0.626546725753	0.919605379901	0.647784110483	0.621543345708	0.667292144156	0.779471596511	0.876080583105	0.757251029237 [...]
+F11Nose.140325	0.63885507685	0.672466798808	0.667038924288	0.771652548207	0.649614650604	0.608451434291	0.577335552836	0.717126133035	0.724935843544	0.612938218425	0.735580856513	0.876968934636	0.634656442446	0.842691083885	0.65586109889	0.625449395626	0.670879227839	0.66333167176	0.711686624784	0.673921257571	0.828283261095	0.66506195021	0.636921766627	0.642890796911	0.555527465402	0.855224106549	0.650357888779	0.772304076189	0.694640508646	0.633806028808	0.860029165664	0.593637309712	0 [...]
+M22Fotr.140381	0.844542994618	0.721890841534	0.664686200869	0.840112206343	0.648850093318	0.708750350387	0.796258851149	0.817157391754	0.829126903817	0.852293005846	0.826341483913	0.882522066426	0.808329058115	0.876659326409	0.689536182713	0.659397156748	0.650534938234	0.764521153616	0.651776173849	0.670504946092	0.932369884149	0.602901289157	0.711660737305	0.889359927677	0.776326922996	0.940038900591	0.67476777231	0.569853645192	0.744247828755	0.871982674883	0.879843091673	0.79884258914 [...]
+M44Frhd.140730	0.71754854452	0.614110130713	0.639316645073	0.741636944489	0.6967809859	0.580891164216	0.572905233414	0.755809826892	0.750934180411	0.645418302096	0.697643043658	0.870835141715	0.592164915883	0.83929648373	0.661087068322	0.656586589546	0.644384877199	0.6468345312	0.728736673227	0.670111360898	0.820043689239	0.638844618056	0.596759431947	0.687929416496	0.505599817304	0.836121845383	0.671024609081	0.755210454472	0.684365917553	0.565766751732	0.85614277519	0.624310762427	0.88 [...]
+M31Ewxr.140754	0.664275849779	0.91223253321	0.912421287506	0.921136748703	0.899825834025	0.840215374019	0.773886694028	0.921541131802	0.915330647926	0.707893563253	0.901176316086	0.951639241777	0.821264427678	0.9500637049	0.915472084501	0.910343261023	0.906996476956	0.902830872248	0.888162652278	0.922101557969	0.350778515389	0.902078723229	0.895282134664	0.596587203826	0.802191646599	0.407353029304	0.873204032662	0.938047793519	0.91263809908	0.684751382287	0.951666486731	0.746456877466	0 [...]
+M53Frhd.140768	0.529350011006	0.772165128759	0.757180586366	0.879680618734	0.752711529327	0.588425579684	0.582750426597	0.837402152545	0.851606091816	0.572147057046	0.855475570037	0.892988466177	0.715355917556	0.906536373739	0.780240305473	0.755087564298	0.768231341868	0.768011103129	0.743649848915	0.786106622133	0.727351352471	0.761510097702	0.731079972072	0.545819073665	0.65557705217	0.745574087447	0.695639040518	0.826653601947	0.821710935553	0.642574579878	0.922270214616	0.6110654808	 [...]
+F12Nose.140365	0.631583528298	0.709918572944	0.67586327492	0.790595413463	0.671723876518	0.638655482545	0.678633848663	0.748203201644	0.720669653449	0.593819712875	0.741150115616	0.893266386438	0.65670518201	0.879142449318	0.695845111642	0.702573603708	0.679152279995	0.686705148019	0.676666358941	0.701848556713	0.778312962851	0.663066082744	0.654517998606	0.770778262893	0.583988184659	0.831832212048	0.609704924325	0.787429521332	0.758847879839	0.700486428688	0.887195214482	0.629432438103 [...]
+M14Fotr.140729	0.715248238083	0.622959544445	0.649652526368	0.816077609321	0.631933442805	0.591777463139	0.657866782307	0.801682674652	0.799591402144	0.663995645186	0.80613900226	0.8590626366	0.661689939279	0.863101077893	0.690573228432	0.614486296289	0.647463018302	0.712867201173	0.65761757796	0.643982490362	0.866515395725	0.633885537639	0.589103139204	0.77546258946	0.659018924908	0.857365899078	0.609102805576	0.687250340258	0.695223011346	0.688788509324	0.881278508753	0.657001396911	0. [...]
+F22Ewxr.140747	0.698309128119	0.901040337433	0.907649135449	0.916770288314	0.884490581026	0.840411312152	0.788015116827	0.917574926667	0.911256471164	0.733494093912	0.896552448518	0.953548913024	0.816281044862	0.95209721668	0.904979367307	0.897317789783	0.878976155189	0.895525532643	0.89269752618	0.912118043911	0.311946658424	0.897288205144	0.880126530051	0.634053959142	0.749017701988	0.524290779075	0.867636202753	0.928366164896	0.900240987849	0.715076237693	0.9475698081	0.749003794048	0 [...]
+F12Frhd.140469	0.691674897312	0.660799271411	0.672627029886	0.727255339749	0.693979891717	0.635387617189	0.644629413561	0.705271500913	0.688453825499	0.588753176999	0.691569338088	0.866011805647	0.59303039286	0.837045442676	0.625790478018	0.659746473097	0.626254937257	0.644255492079	0.706262971919	0.670147206609	0.816347344774	0.599562008127	0.639280400998	0.721152373533	0.454508147082	0.85146865394	0.69113605246	0.745819766474	0.710795402002	0.613745717146	0.858720973699	0.636911027644	 [...]
+F13Knee.140670	0.785067388822	0.589824309577	0.647513295504	0.760938963333	0.668058203197	0.646668970638	0.712088829713	0.743571176203	0.722217218733	0.750121499835	0.763996991815	0.876717565385	0.661037271861	0.853424743644	0.622593960942	0.582712263825	0.613323407105	0.648231380297	0.716019990626	0.6353371181	0.898512892056	0.643788988499	0.584568155903	0.815719468412	0.625579629177	0.909762834981	0.672580713412	0.658717809449	0.659193759214	0.755342765191	0.827558137169	0.740621161323 [...]
+F11Navl.140566	0.752654240786	0.705924606344	0.751713604769	0.783475184049	0.777671727541	0.588416977162	0.666838098379	0.794864758006	0.795593756271	0.581759824559	0.756652402558	0.871416231216	0.60923105527	0.850177044563	0.749588548863	0.714657087719	0.739914000203	0.718007856701	0.794006986038	0.743576203146	0.799721142139	0.726994584111	0.656755911596	0.661059820659	0.589019145521	0.828601642173	0.739293586567	0.802440571076	0.748644531393	0.522848276341	0.855749638289	0.65238908625 [...]
+F21Aptr.140500	0.654313427267	0.655242788198	0.65708087191	0.787641794383	0.67225749381	0.595896449664	0.589106730708	0.75909213423	0.729387229285	0.607366037001	0.75294070376	0.805299387661	0.545053936957	0.788230548272	0.713185386687	0.672547241783	0.645671650711	0.704922766829	0.764532330138	0.722463978679	0.84045157505	0.691978584325	0.662066816312	0.643195468268	0.582303716078	0.856904323986	0.695204238861	0.761677423995	0.647594205987	0.618242890928	0.799303155137	0.639725941065	0. [...]
+F32Aptr.140339	0.786848427646	0.640919322842	0.634219178025	0.731133174847	0.633355867059	0.658010129446	0.678352015077	0.687325321463	0.705100361753	0.746751316348	0.686276186482	0.868376745425	0.678499840852	0.842366165637	0.604357950518	0.624198996246	0.623337398444	0.615867095744	0.736058363422	0.619932742517	0.886029460204	0.564060502884	0.649173297471	0.813050963925	0.631097020867	0.905337747508	0.685511630587	0.697711891036	0.723171044136	0.767005323592	0.86050989224	0.69835066604 [...]
+M34Fotr.140476	0.801982320704	0.629880828452	0.695778407544	0.790100526935	0.72500897962	0.625410769481	0.783783281754	0.771183614903	0.762651692229	0.7766137882	0.774409319885	0.793629942286	0.647318790081	0.767161045763	0.717346777652	0.689159721192	0.684947020192	0.711797220748	0.76392488394	0.711211709492	0.906626545998	0.697554835094	0.655979229175	0.829977003781	0.71569123353	0.906110864431	0.741925189963	0.721754139731	0.688787592468	0.807339944892	0.75846313703	0.746217903575	0.7 [...]
+M44Knee.140736	0.836502082857	0.543451069977	0.627758028121	0.748013606372	0.616159187531	0.714464253476	0.768276346007	0.733915372197	0.72339335391	0.821886531158	0.7587119777	0.811320845743	0.728646668055	0.801632375985	0.624761479375	0.610318307186	0.560820792572	0.656379427221	0.732175395385	0.604643808714	0.93050040379	0.603985138098	0.64255304085	0.857600571706	0.689305880354	0.938249469304	0.717792865562	0.590855877995	0.626461378371	0.813582328361	0.807577982634	0.781328842863	0. [...]
+F13Tong.140677	0.898790430807	0.779548512129	0.715760816109	0.431464917659	0.794748524593	0.790520511757	0.867177191583	0.387819340253	0.44326584024	0.842364261651	0.383264849171	0.82885522419	0.756917750207	0.766997052469	0.608173294276	0.732463095007	0.764492044056	0.592562767754	0.881502161572	0.704424608999	0.873770598248	0.761907464745	0.746215885436	0.903036702343	0.717440739911	0.918838590478	0.853334451846	0.847929819083	0.799189014781	0.847108425141	0.840802668816	0.809123946176 [...]
+F32Tong.140410	0.822588850934	0.718543576987	0.675416174136	0.344547877336	0.719408186421	0.714493628571	0.793306511325	0.337420103398	0.341947619461	0.769009708333	0.247857215617	0.838877245496	0.660384824761	0.772899002974	0.619253518778	0.684977199893	0.684346926992	0.549298062973	0.798513839963	0.622013544345	0.882651235797	0.659868768878	0.67498561255	0.869323193633	0.590770625019	0.905133884064	0.767332393526	0.813005115602	0.759031874344	0.7927727947	0.840760566345	0.68182192188	0 [...]
+M13Knee.140288	0.747778388502	0.692681606278	0.639087533062	0.810189288834	0.710128599959	0.630932174483	0.715726598437	0.822674885889	0.829203994878	0.73030989106	0.788188356171	0.852531789084	0.653657121695	0.84176851315	0.707227313596	0.688015368701	0.694368323181	0.722728850508	0.769739659669	0.731147174119	0.873522271714	0.67659661309	0.644529341354	0.771719183387	0.722849348034	0.898425168628	0.712371832451	0.750702353398	0.728275045229	0.715673697767	0.842789824213	0.735626890749	 [...]
+M64Knee.140570	0.825233202876	0.615457337748	0.66054004183	0.81164658402	0.628677372252	0.683114998728	0.734062610156	0.792770686997	0.764338743971	0.800913161435	0.786655490188	0.826811773694	0.676093702068	0.815377390177	0.673989949833	0.60731647468	0.617623128416	0.720004615943	0.750573924775	0.658907801705	0.920235395682	0.622559991455	0.657084613373	0.833708499817	0.699123395865	0.932446176672	0.705336996808	0.561952252407	0.628145618712	0.811569327725	0.802733203027	0.784843753843	 [...]
+M44Fcsw.140418	0.920272438403	0.762128463434	0.850614219403	0.808108715617	0.852894108298	0.835173199564	0.889813292897	0.827331050972	0.796687005803	0.877021549099	0.810783033571	0.623782561681	0.756860934812	0.677764871462	0.853726762561	0.843959683017	0.802757406459	0.817419644228	0.896036939463	0.844474089158	0.950311311827	0.855690134186	0.82362191851	0.890041455963	0.835517087422	0.944174395372	0.896564626011	0.820357181854	0.707342817118	0.871896058713	0.643304881992	0.88223950008 [...]
+M21Aptl.140554	0.78038591567	0.647740666847	0.601462698904	0.710159522086	0.634544445308	0.646878275507	0.673797931696	0.673774471837	0.684565462082	0.758311734929	0.665029437128	0.862927947878	0.667552504429	0.789126459103	0.623259996913	0.607442613203	0.618668843656	0.634839132498	0.734405100152	0.564870199068	0.898921690688	0.570051683395	0.650668840764	0.795615048129	0.621141875113	0.912723825448	0.680637924975	0.675089891467	0.643532948896	0.766441770934	0.844456767857	0.73055338310 [...]
+M21Nose.140682	0.835968250156	0.633999019922	0.681157742197	0.654128456125	0.714775816018	0.701002228001	0.730863082157	0.630423678114	0.570684471423	0.804890494209	0.667216468304	0.739119179301	0.665671875264	0.673793761425	0.615941271672	0.679792532718	0.594114931861	0.556261952654	0.823730382991	0.616895976563	0.910978651941	0.699562605492	0.666950017763	0.81269534649	0.657098482603	0.923573609754	0.774849448236	0.735049167354	0.620933060978	0.811586213809	0.763655297008	0.74813869619 [...]
+M12Aptl.140844	0.370209560115	0.783549589375	0.770233777599	0.883661851503	0.771277341042	0.639461482599	0.585487031636	0.855337638639	0.85579436672	0.517299516037	0.86046666289	0.924106116099	0.687565329984	0.925521135597	0.789684218414	0.771390283805	0.774053267678	0.789034363929	0.777703210119	0.804118502697	0.739625232005	0.757622173262	0.742199612261	0.543019915399	0.648155933732	0.744867578328	0.725235666106	0.836586047887	0.798284287094	0.579119996027	0.922537579923	0.628163502764 [...]
+M41Fcsw.140359	0.94225361058	0.755193981852	0.870879291145	0.836005683219	0.881344307627	0.86785828055	0.906655158103	0.851448474285	0.827915933685	0.89331429495	0.837432648556	0.681763720328	0.779522587464	0.702333777689	0.87469235337	0.857285589021	0.83185072409	0.857328950012	0.928436025299	0.857102904354	0.962622540324	0.874232381437	0.844987563783	0.907976717076	0.850606868577	0.96074502316	0.927598720771	0.852783715241	0.738706466185	0.89823854377	0.668461700002	0.897082594537	0.48 [...]
+F22Indl.140644	0.747066151523	0.618650512735	0.516816693976	0.660782020105	0.570505310609	0.613459468953	0.661400694197	0.63184615042	0.641198485908	0.722969573584	0.639822866563	0.846467688071	0.674545557653	0.826141734492	0.573406731044	0.582089834598	0.554134204648	0.587471194691	0.705241131354	0.56442574736	0.880614892046	0.513522313904	0.590853376688	0.778954259462	0.506373499533	0.904107191241	0.619695892292	0.67928519313	0.671661311228	0.742435724934	0.849753946805	0.689811811225	 [...]
+M11Plml.140620	0.745469661008	0.683716109656	0.582118413699	0.778657546755	0.582269685923	0.721431066082	0.668536031152	0.753360565542	0.758906586723	0.758848180416	0.803303529684	0.901540839023	0.722924222455	0.888952389297	0.653589840389	0.600426888273	0.632982009148	0.658564267284	0.719080887606	0.669638733845	0.897883042335	0.568586930104	0.694862233131	0.780531172022	0.687263987839	0.911499997483	0.686001549117	0.682287865695	0.730697563609	0.748968380671	0.882938592276	0.7165708421 [...]
+M11Aptr.140378	0.456988814863	0.781274945649	0.720860700141	0.858214708308	0.763089643702	0.647437461294	0.594751704921	0.832061054312	0.831194690045	0.601985578712	0.879638786623	0.928436072861	0.733013322316	0.929862257217	0.75353263184	0.751959164482	0.768242034741	0.752648773896	0.739662935496	0.774983421908	0.773684478045	0.774670823381	0.7372035196	0.6240416019	0.694717262623	0.788787378265	0.712137223082	0.823873579363	0.80267707216	0.639082231576	0.926083580557	0.637872145636	0.9 [...]
+F11Ewxr.140797	0.722691664755	0.674198497101	0.709767723569	0.753888841424	0.722695264937	0.620209696227	0.685555253448	0.745306690411	0.737969230527	0.641403777771	0.714446259285	0.817267487413	0.574866234899	0.802987326705	0.720701049385	0.676100936687	0.658330734446	0.715123459898	0.768184928681	0.741244118817	0.758151629202	0.704399670003	0.663772118565	0.727289653567	0.507283607778	0.839154705866	0.716370761963	0.775864389474	0.703777832999	0.655274570011	0.834563601859	0.6839674533 [...]
+F34Knee.140341	0.802958000163	0.610241079793	0.551907933035	0.720158738341	0.600205404487	0.647606933297	0.704072744754	0.672285972599	0.708974756666	0.786767142044	0.695056245968	0.890262192876	0.697869359323	0.876206349649	0.549982205135	0.556674985326	0.553605841044	0.586931497952	0.690991096278	0.57636567888	0.912321023596	0.566728949605	0.53837047609	0.814862649096	0.60515979864	0.918210173404	0.648308838291	0.63102898026	0.677352690378	0.787513081038	0.890117313174	0.716097435645	0 [...]
+M44Tong.140642	0.890187822172	0.77300695863	0.686386045882	0.272154008317	0.76710624106	0.775352635917	0.853811871709	0.327097166245	0.350637477677	0.840892455337	0.262690898112	0.844161026042	0.71070197622	0.780899930309	0.630901416501	0.698493238067	0.736584863234	0.532271887092	0.842379524115	0.62892849904	0.910528747949	0.707899819518	0.729782225117	0.90617430732	0.680814684105	0.910472065854	0.820753532887	0.840237636563	0.793746718582	0.824561926073	0.851098039134	0.765255194327	0. [...]
+M21Frhd.140641	0.812936862343	0.610078720172	0.681305441896	0.647339173433	0.682129683319	0.682948914471	0.732808922274	0.613235529056	0.56719062271	0.768042008133	0.597989265472	0.76398910357	0.609892286719	0.716675866662	0.559431241701	0.632996905273	0.555232045174	0.564387054867	0.782109043963	0.629584874325	0.897172474388	0.65325595778	0.675514727064	0.807689849762	0.592152143521	0.913313412224	0.75352286109	0.722214505086	0.651513448658	0.773013366313	0.784395867572	0.726458711789	0 [...]
+F13Ewax.140760	0.676200274554	0.870204384339	0.871586275679	0.871726264766	0.853031272522	0.810750708366	0.719763821793	0.87686698237	0.867257390135	0.713419962814	0.844655429423	0.940053037024	0.764862764295	0.938199626	0.881637283697	0.86643799187	0.866351197018	0.86630039585	0.864554991804	0.892095111911	0.492309541531	0.865278428597	0.849664264027	0.651589956473	0.724944983313	0.585646544835	0.813709520274	0.908538713269	0.87573531791	0.693603143173	0.91822561314	0.662899125433	0.945 [...]
+M41Aptr.140772	0.673081888243	0.660224733379	0.692810024144	0.801831349241	0.726106817345	0.596253120787	0.653117173171	0.779103490501	0.775328101435	0.500095160261	0.779249731949	0.839502839705	0.632927264181	0.849821060494	0.714462535311	0.730688704538	0.702552689033	0.686525457664	0.735520857552	0.738791749525	0.80087176305	0.705143091852	0.654652517514	0.639910324221	0.615210745766	0.832442826617	0.665990599654	0.790160110829	0.761686613045	0.41761841541	0.874423994944	0.637153996039 [...]
+M54Plmr.140626	0.841086671138	0.655844548491	0.584095929913	0.525642605744	0.673796336541	0.685775169793	0.763532339132	0.508521530611	0.580222354332	0.800912635818	0.588318469277	0.858910352208	0.670405028931	0.840575156974	0.486418700308	0.572084808318	0.604569825246	0.451734475314	0.791181000624	0.595866033182	0.883693163063	0.62733960959	0.661608804493	0.857615588445	0.626937697994	0.930108651313	0.752750531764	0.753343463873	0.730949111678	0.830248125968	0.859494884825	0.73503762632 [...]
+F13Fotr.140798	0.645151972072	0.721314148324	0.736446522243	0.782793878983	0.764099101717	0.55115674299	0.655202685086	0.759115479909	0.758493536582	0.569111744983	0.737222578618	0.867932768688	0.577069321022	0.858478923295	0.733371232305	0.734272115794	0.731565268373	0.716908500281	0.764001434668	0.731260987815	0.758666572492	0.717451263143	0.648583390894	0.666409342792	0.53150111874	0.772062777085	0.712769850801	0.788323312318	0.739969574624	0.613720823331	0.856218667702	0.557306368298 [...]
+M44Plmr.140868	0.815524862984	0.616091739838	0.558801363907	0.651426690917	0.564749830904	0.660116875474	0.739307805192	0.624567108829	0.64680317417	0.773845840431	0.618787511737	0.859074869247	0.6797716192	0.824042810686	0.581480613221	0.61582369311	0.560186507294	0.569415875036	0.716351589628	0.570952193361	0.898060638154	0.563143559287	0.590281683418	0.838476337662	0.606036079479	0.920455768329	0.695987005919	0.665381287544	0.680228239719	0.778147685804	0.864014269635	0.713154691546	0 [...]
+F23Tong.140417	0.88529611599	0.760577724219	0.683424766419	0.278303363821	0.770515704883	0.776954537195	0.859824433686	0.411911554147	0.428120146751	0.833896703332	0.340787797238	0.826263557355	0.689050551266	0.805153412523	0.633984370341	0.706107958533	0.732105924145	0.549650161108	0.842990035334	0.669682405051	0.90603128961	0.710605402274	0.725564428206	0.90185822206	0.662136899791	0.905589009731	0.819861656538	0.836192281598	0.805476302613	0.816420804378	0.843075043044	0.755884354379	 [...]
+M32Fcsp.140470	0.927046877073	0.814970778057	0.845058036456	0.792470210548	0.841670400934	0.854861148044	0.901778186193	0.824377301994	0.773918962831	0.871810053527	0.778972000569	0.633940667564	0.805468710359	0.575211751065	0.865850314476	0.854958655208	0.803466261459	0.824797996104	0.89523164964	0.870674589927	0.947894029967	0.849956388196	0.870174031633	0.935632653932	0.837164615187	0.943205734601	0.898268483034	0.845742772929	0.797834116945	0.881408655346	0.63443963444	0.885530188976 [...]
+F22Aptl.140370	0.568131096511	0.741712106344	0.747862711713	0.87311048509	0.747117083198	0.633851612813	0.553568760908	0.83205793946	0.845692597167	0.614110991928	0.848294599303	0.900539445009	0.652730421418	0.909657301566	0.77037773205	0.749282684214	0.759805131089	0.77674510145	0.770275093972	0.781946542961	0.748149599807	0.764095865546	0.698228629708	0.520738692475	0.629764326091	0.746236454349	0.718175677593	0.830381546576	0.781183564698	0.599004542627	0.915518139434	0.640197697799	0 [...]
+F22Forr.140732	0.79477829651	0.628429668656	0.584594690892	0.732530970842	0.554781075029	0.672569172646	0.685864443391	0.69420370033	0.695782705195	0.795261851247	0.705461702552	0.884980055035	0.749232842153	0.854030134348	0.594839708303	0.563982319101	0.563819757472	0.67075954942	0.690770926867	0.562674454357	0.90857834343	0.57575349505	0.637417983367	0.847659237995	0.657667013088	0.927134418661	0.580775232848	0.659706212233	0.699869713168	0.810019051507	0.874653874382	0.728222868131	0. [...]
+M53Knee.140318	0.837202303227	0.627810969219	0.614867185806	0.777884117922	0.664865920515	0.706792429196	0.787506691225	0.757063448858	0.759548798868	0.809650616703	0.76698543211	0.832971118719	0.70907153057	0.830629124742	0.655729164784	0.655824112118	0.60519587443	0.659289983626	0.745963888866	0.683698167616	0.922919212369	0.663432076095	0.583990965157	0.86242213641	0.726743144579	0.93388671924	0.723747825551	0.659923968523	0.695972769434	0.808888224974	0.845873543601	0.78589548348	0.8 [...]
+F21Plmr.140745	0.739924065243	0.657788907241	0.590916478746	0.75189063457	0.620756591181	0.647998591707	0.672249230632	0.724118987823	0.736620435974	0.715564673406	0.737455127588	0.876498813724	0.665569579121	0.861711006741	0.628183424113	0.632691870514	0.621254049262	0.618797667566	0.644412225086	0.660934314566	0.872377326876	0.580947433586	0.636943642595	0.767406518201	0.65208353019	0.89211246739	0.650217023832	0.719482550603	0.709009490214	0.686097995307	0.876870757445	0.666771196922	 [...]
+M41Fcsp.140643	0.910857114835	0.803643155031	0.867020367643	0.852114398309	0.863215741091	0.856925470105	0.892369218403	0.85869002807	0.817017631711	0.864529221395	0.830912710041	0.701544145079	0.795239093209	0.687440459439	0.884268358944	0.874695679492	0.840016515366	0.867334569209	0.906760029051	0.883179949561	0.944438640479	0.871995504801	0.885922947043	0.891209592549	0.837059580822	0.939075924394	0.900526885863	0.88208263936	0.818546876402	0.885304274442	0.66154175901	0.862728177417	 [...]
+F33Fcsw.140662	0.928427832713	0.786758151606	0.860678414627	0.834674119623	0.85796695395	0.865768233523	0.90809896858	0.85421935068	0.805061092056	0.895425169885	0.827289465258	0.681167400433	0.841059567202	0.645323657987	0.879130815981	0.870640146028	0.828265874023	0.846850581816	0.901255602668	0.866801965502	0.949015934186	0.862577627366	0.879099811908	0.934864074758	0.864185313082	0.946445398168	0.901528506066	0.843581216082	0.789531215432	0.900053784857	0.584907657537	0.899595318729	 [...]
+M11Fcsw.140610	0.943929186013	0.821603438057	0.874334764493	0.866273858921	0.887138110745	0.869237022173	0.918402065228	0.882155297051	0.848468229423	0.906609581605	0.866929349982	0.675565699996	0.839091531141	0.633317779632	0.893152294949	0.885141399434	0.837782374898	0.860754787341	0.93217266441	0.893513773697	0.966593441874	0.892957833399	0.900733767825	0.925467014527	0.876389116597	0.964726624895	0.919768655235	0.865007497571	0.787545111947	0.920368934122	0.38707617603	0.900816574304 [...]
+M14Plml.140544	0.645543408378	0.701823901806	0.646365992557	0.833696609528	0.641322590675	0.659553279845	0.645334994692	0.810898552404	0.82764051024	0.724307701316	0.814606517092	0.918725450829	0.696097672677	0.912733834178	0.6973446909	0.671069581379	0.672919462753	0.716086233536	0.714344831894	0.717067905009	0.859452307059	0.648178326472	0.680693489936	0.747218790256	0.712672151127	0.882662824745	0.65668776002	0.729974719642	0.755113987572	0.766934577613	0.896716323894	0.679308597285	0 [...]
+F14Frhd.140741	0.754183414755	0.638575968759	0.591138590019	0.55297351861	0.642938816796	0.59449073987	0.671187045954	0.511096637214	0.521387399844	0.705552315153	0.469621578016	0.839821791795	0.593296053073	0.79216616858	0.559471835287	0.588082169896	0.588510671526	0.546508332693	0.74260016312	0.548734541654	0.864753267905	0.584950864021	0.649586430959	0.761444179938	0.562017607197	0.89146287886	0.681498558671	0.733837155777	0.713701018315	0.74533295125	0.844822827868	0.631207301096	0.8 [...]
+M34Tong.140540	0.905388674775	0.779995964618	0.703059415683	0.184579081061	0.800624677097	0.77681728366	0.864555981533	0.407808329822	0.479142545752	0.852620948913	0.350007965207	0.827993192908	0.724077610753	0.784051075998	0.647149729218	0.725476153056	0.760433884231	0.529240298779	0.870365915086	0.680765762588	0.924196996795	0.758297268701	0.738221537904	0.909587061825	0.714297915791	0.92507214037	0.855473096143	0.833785656346	0.783972856291	0.857347785349	0.822834195662	0.786576741477 [...]
+F11Fotr.140431	0.686143134754	0.736915597044	0.713296284041	0.826059191528	0.666968027466	0.712695696988	0.682787982344	0.781467728125	0.790707673967	0.700183561406	0.775592000235	0.910366092721	0.720753119394	0.897210235023	0.734937985161	0.725917281395	0.704337450616	0.737476856571	0.754481662	0.731358002906	0.78525311091	0.668694026507	0.723764500623	0.778535826587	0.547797300597	0.820192217971	0.714989558862	0.770366492453	0.768165117756	0.726596193001	0.881992489673	0.599227866082	0 [...]
+M24Ewax.140460	0.582163524102	0.866938643086	0.869924842383	0.907433589677	0.850971033815	0.772079233195	0.679399565899	0.874352551328	0.864710789766	0.65204151959	0.89857384592	0.950184117117	0.766097466758	0.952486613211	0.879026579369	0.855567014426	0.853568830954	0.851709410206	0.841696785656	0.888539049001	0.463927596482	0.848043232369	0.847709885783	0.596407802379	0.723308127233	0.594768490084	0.812664092857	0.90849806995	0.878977844053	0.723093646423	0.937792080038	0.651086417565	 [...]
+M42Forl.140600	0.78119685078	0.597141499194	0.587521020988	0.743436490048	0.580993064913	0.645596785866	0.699711901182	0.703613299644	0.710433112344	0.753432343602	0.713463189224	0.884108124835	0.694956944631	0.872172010766	0.554878361076	0.545019754627	0.536160370502	0.602530902503	0.682649091106	0.586307562785	0.898621920756	0.542299874547	0.579215803228	0.803290118916	0.546181178063	0.913679459737	0.663303103077	0.650449883674	0.684777063625	0.765909584155	0.884160590959	0.66166424216 [...]
+F21Frhd.140623	0.574707006619	0.817140194893	0.805600772767	0.872681550287	0.794735036031	0.701009452875	0.605052460405	0.846691097681	0.835190417881	0.682225353235	0.84367435629	0.940893957134	0.734125917595	0.942864138711	0.829620876463	0.816900261815	0.815577098706	0.803598716019	0.777441806066	0.827415811462	0.670151066009	0.794335982997	0.784364774754	0.640751262718	0.697488151974	0.717535570698	0.718191047768	0.866634337389	0.851185760146	0.755708319036	0.898495544684	0.54230112992 [...]
+F24Fotr.140273	0.590204451151	0.849166157872	0.851723672815	0.893656098586	0.848725968038	0.722920607315	0.634078617916	0.896568342601	0.888850568654	0.702490686403	0.868693342304	0.941768789349	0.78769424802	0.940021499823	0.866865624957	0.856341637355	0.846471417821	0.844442422312	0.788017505082	0.865811537721	0.605898933358	0.840133080376	0.823190155905	0.605108408804	0.772688874892	0.594301668395	0.776754559378	0.888666980676	0.881903221497	0.664067778847	0.926070268246	0.62856716223 [...]
+M11Kner.140517	0.776999879456	0.621892953382	0.599762523832	0.768309295526	0.606844155683	0.68713767871	0.691771482682	0.755647950155	0.737873084473	0.770935474581	0.776481505279	0.845685015117	0.631682138692	0.834514728011	0.624639498176	0.636500397739	0.61062601296	0.661459677874	0.732337220327	0.631496355768	0.908404482323	0.568378170025	0.617698639137	0.816283724489	0.671241462169	0.923355586561	0.680326410692	0.627934496737	0.617461082648	0.770788448059	0.80610103214	0.741521044915	 [...]
+M34Knee.140362	0.818819926597	0.59255591167	0.705635977845	0.795425275202	0.730624974507	0.63811953716	0.779479625338	0.78505664526	0.759378705648	0.767789816212	0.77965588184	0.761215485482	0.604544404247	0.730920056542	0.743022206863	0.696090068855	0.680492285887	0.706329627337	0.818164849096	0.725655250208	0.898945970543	0.724131308319	0.620297808648	0.826736758857	0.707295165868	0.915881987308	0.761646720747	0.732438116	0.63501319274	0.794172322462	0.734625152819	0.790453699469	0.757 [...]
+F12Plml.140300	0.842916537292	0.636937450734	0.560258370399	0.750956663033	0.589351851852	0.710975583028	0.735049213586	0.7287539182	0.74518132144	0.824073337848	0.768670450465	0.849750483249	0.736765946193	0.854245134987	0.560830072272	0.581458499544	0.588582212392	0.629117808742	0.696826232233	0.550795135844	0.919402709579	0.563191799007	0.628521989788	0.85085000989	0.68735872682	0.935852442486	0.691107033862	0.578637037178	0.687962837717	0.820433301701	0.857201936854	0.759997750945	0. [...]
+M63Ewax.140384	0.6641481764	0.827754022168	0.835431014846	0.857621554683	0.846124990053	0.713294849352	0.586249880558	0.863853491294	0.868393654559	0.594053796071	0.824178901352	0.917794748863	0.68539229956	0.915354164612	0.841143559106	0.826258117727	0.847608000027	0.801093641887	0.847941639918	0.857911018498	0.631574408761	0.82111964169	0.790196706685	0.414669404974	0.696446347644	0.659476749298	0.830236662808	0.895059605443	0.848116254981	0.477629558626	0.923214751165	0.633915959648	0 [...]
+M54Tong.140619	0.903347255721	0.765028689032	0.687611586081	0.159568590102	0.777484734744	0.797655143125	0.871158236451	0.383135247279	0.422010129827	0.859750560532	0.3406209494	0.846490894028	0.732480620868	0.798886530292	0.647879388924	0.727345075051	0.739136098271	0.553253896838	0.855794690746	0.6426894634	0.922387910561	0.730856460991	0.740232045873	0.917725856462	0.715785712965	0.923160159715	0.837422047199	0.837402971264	0.797184366316	0.846319932161	0.855019331753	0.796756049733	0 [...]
+F22Pinl.140717	0.724683026298	0.836912967822	0.812558303752	0.845683300223	0.787805587949	0.801007536153	0.656488388229	0.852766781105	0.853094811538	0.671581338296	0.815531957756	0.907271398972	0.757709761132	0.884992316727	0.857703739801	0.841278533834	0.825639551881	0.827783716284	0.870187453404	0.851825739636	0.579342259558	0.800287711946	0.829395224468	0.639457324181	0.655350058907	0.716346572864	0.826292225368	0.871975588859	0.847567176084	0.656516111777	0.90408630748	0.69611251930 [...]
+F12Kner.140545	0.820483745416	0.657857247555	0.599923843738	0.746352135359	0.666557528812	0.697510378484	0.760907204673	0.734275203062	0.75116440422	0.814623207421	0.76946293968	0.876933318601	0.717883532688	0.873996813581	0.596330366814	0.619294454983	0.649275284497	0.640202489122	0.706349704362	0.61477860708	0.911204857226	0.600811430693	0.613836646242	0.842728210591	0.69881033905	0.929776062523	0.697895735166	0.635538154843	0.642292429898	0.811275250054	0.854774732429	0.775594154861	0 [...]
+F12Mout.140450	0.858578376648	0.732729915857	0.677447408019	0.362714155874	0.742791537579	0.748077925744	0.826091605438	0.279693198692	0.277808133086	0.810167126206	0.311708313647	0.848793591577	0.694301506388	0.789990067843	0.580391455361	0.680879612752	0.692821171941	0.501507103797	0.827230904687	0.600934214492	0.914679381883	0.689256192013	0.728785025616	0.900546845565	0.64665931912	0.914943562494	0.80235328979	0.824944557702	0.770696258281	0.846319889256	0.850140676902	0.735244018357 [...]
+F23Ewax.140304	0.700743016604	0.899457497635	0.901225893192	0.901064607484	0.885090862867	0.841955609667	0.789972490642	0.903388200062	0.89585436327	0.735742598339	0.876500134197	0.953388556508	0.803374433217	0.951926662172	0.90351594704	0.89762137234	0.881431277356	0.888951908531	0.894025910833	0.910761600368	0.312933957694	0.888103620961	0.882613897631	0.646239444683	0.749049279037	0.527400673319	0.855275495287	0.93003962017	0.900759994177	0.717461594712	0.937908963919	0.71353193252	0. [...]
+F23Frhd.140622	0.661553272614	0.819978477694	0.802913737161	0.838099451507	0.781753346949	0.74356641652	0.674099018525	0.850658339031	0.838782893922	0.734350085599	0.804502581795	0.917630270956	0.800895305414	0.892034947275	0.819669030961	0.817372469836	0.800421121495	0.813689333343	0.755390581826	0.840663969769	0.690821808933	0.793234116821	0.788132755634	0.69974430423	0.749187837882	0.740212680463	0.736040382049	0.861229250268	0.867083082192	0.73036069206	0.919460344837	0.646402459182	 [...]
+F12Aptl.140591	0.805699246108	0.61336124891	0.602277846986	0.776951281769	0.596271992158	0.649401629656	0.741612648341	0.739585489008	0.735876506237	0.790352151593	0.750968334223	0.830636833273	0.679592526067	0.827870293086	0.586359189066	0.607451620962	0.606298279583	0.65572352684	0.657089363677	0.606989741574	0.897001460281	0.593661600422	0.60184034823	0.820546617773	0.668259542816	0.918563466362	0.664926216556	0.63975487296	0.702149513183	0.793639071681	0.856408581898	0.715799314984	0 [...]
+F32Fcsw.140706	0.956173386145	0.828713233691	0.88067234542	0.867398333096	0.892632321037	0.885881100548	0.928468730402	0.884158090136	0.850273444614	0.907496824497	0.86180851225	0.64920447434	0.849951849347	0.646123096056	0.900838097049	0.890069731664	0.840738508523	0.869388418335	0.940926758314	0.906241007936	0.962986448083	0.898859259715	0.91135016135	0.947846326545	0.885490536877	0.960680825896	0.936303968206	0.863989083749	0.809818814846	0.912350619158	0.535267178388	0.912710264954	0 [...]
+M22Indl.140813	0.869972366129	0.691515582131	0.709809336549	0.758810131972	0.727525563088	0.739014432059	0.809054336168	0.743853149306	0.70604386839	0.82955462989	0.749762404009	0.723328854405	0.697590814224	0.655526795678	0.722434309549	0.708591137343	0.633601537234	0.693910149013	0.842106077288	0.693723125718	0.923180451742	0.711963306855	0.675662475892	0.858632714735	0.710592321314	0.926172802215	0.794487915776	0.727771624316	0.629897515695	0.833085239202	0.741365608913	0.810868108166 [...]
+M31Aptl.140756	0.605924016181	0.826051246051	0.850256075104	0.907363590655	0.851633459915	0.73341795718	0.565721273588	0.876734285586	0.878611260528	0.529661248091	0.886453000033	0.934990631114	0.693156465534	0.936869619603	0.857365925992	0.847806970509	0.853003297822	0.814813857093	0.84982043979	0.870822877564	0.620524551194	0.834403484905	0.800562939668	0.364812409627	0.701986562891	0.621746028199	0.836023098131	0.897192981817	0.845937109593	0.537880616372	0.93188095107	0.637924522618	 [...]
+F24Tong.140829	0.901106011587	0.767533657759	0.69140886224	0.211325113814	0.782137452368	0.76074595694	0.859021861566	0.415115270759	0.432673264274	0.845963412476	0.325847429473	0.827713500742	0.724999248267	0.770297420949	0.647900532409	0.706683949124	0.729386190476	0.530143348035	0.866272474183	0.658184999264	0.920392383068	0.744892361565	0.73382312398	0.905339253442	0.681117291862	0.921043964461	0.846349570358	0.833138947899	0.774555040797	0.850706255756	0.825786698005	0.778242736915	 [...]
+M42Tong.140857	0.890446712318	0.776156762567	0.705346172845	0.298207965084	0.783083946981	0.775211097475	0.855936231634	0.324896616898	0.352648194744	0.841262916269	0.278880265002	0.844420882782	0.718260748106	0.781292376389	0.638860344205	0.716357349365	0.747767860241	0.550764903438	0.854394282633	0.647273172759	0.910765408019	0.727715210218	0.741605947897	0.906402398597	0.674257309389	0.910727920131	0.834829564188	0.847876376705	0.791135803931	0.846319538567	0.854257988179	0.7668621893 [...]
+M64Fotr.140624	0.773317084517	0.731810652686	0.70972312308	0.851520126446	0.648896743728	0.705740811194	0.715687036677	0.841463320538	0.809442099512	0.781286499891	0.857513035631	0.885017157096	0.769383938014	0.881404002775	0.738114212296	0.679865759951	0.676286594684	0.794254320886	0.660882893549	0.711110537146	0.89571860941	0.645206981601	0.744451511002	0.84345287667	0.737407646594	0.895642769819	0.678821308287	0.594457176615	0.715997094413	0.818463118992	0.874674227041	0.752487401956	 [...]
+M31Frhd.140336	0.610404988126	0.726659697619	0.746992921184	0.775820936481	0.764469210237	0.558727811971	0.641581197838	0.752647680828	0.751628755937	0.507843273957	0.726898470547	0.856615960414	0.548229370209	0.836309159839	0.762150896798	0.746674388965	0.7362593595	0.707982108182	0.768312767601	0.756172605834	0.74176568052	0.724282793728	0.65300863198	0.600772832336	0.510840785004	0.772002584643	0.723411723412	0.812074909215	0.732359145796	0.58186823158	0.847598278277	0.546837117911	0. [...]
+F22Fotr.140698	0.655457911114	0.703974011999	0.666068092501	0.736332324174	0.658884744416	0.566467969597	0.637810210722	0.715253219687	0.71703330523	0.620935745912	0.708161656849	0.848850084388	0.688723715496	0.818575401534	0.68720176832	0.646164318683	0.655531823936	0.646825404035	0.67492844364	0.693163742485	0.795663521537	0.633288715419	0.655198869794	0.71717675923	0.571189156858	0.804139593936	0.638992002113	0.769476569337	0.774296150956	0.62794650128	0.84808743429	0.510990316324	0.8 [...]
+M11Tong.140453	0.885912013678	0.725335341318	0.640078609401	0.240963612087	0.757627138384	0.735791424609	0.818686380438	0.352836960339	0.43628128449	0.805970802235	0.337620689044	0.840718705054	0.697570621881	0.788953318895	0.556723863878	0.680683263428	0.700304131869	0.449344413604	0.852407039163	0.616279322093	0.902544502088	0.692461242642	0.697661771102	0.85993783736	0.668132236168	0.927553139528	0.827421565873	0.815375117437	0.771620609339	0.78778178943	0.849049932817	0.759764453384	 [...]
+F31Frhd.140832	0.727422036516	0.66632027426	0.657424720708	0.763528931147	0.684430425326	0.592596879767	0.572375336476	0.729058091225	0.755211447173	0.630712269972	0.724107692047	0.853398812335	0.617453297247	0.845325705272	0.678947419898	0.65314203278	0.646285960534	0.694485460186	0.716028305816	0.701193380131	0.826650864436	0.651090482629	0.618704337731	0.734737085728	0.523812225753	0.857152036784	0.625806885988	0.743301064248	0.713212629626	0.700076780394	0.864307614205	0.602410700446 [...]
+M41Fotr.140652	0.507282820575	0.829614081828	0.853118201437	0.886402729868	0.830224574884	0.763194493624	0.702615158016	0.85648813099	0.845543260465	0.575273084427	0.860075187755	0.929906850686	0.799390071109	0.921160565274	0.846673492103	0.841116098789	0.837862116198	0.845189176453	0.813902425049	0.846555416225	0.706361301611	0.833355674773	0.814796917573	0.708126937925	0.692271351031	0.578447903457	0.789228887389	0.884843063663	0.842039136447	0.670542697441	0.929669430183	0.69044652663 [...]
+F31Plml.140322	0.794424020982	0.714240627145	0.655002571777	0.806572641165	0.588334010348	0.710741665817	0.699740939617	0.772973767652	0.794006230635	0.80788679226	0.77978141513	0.890204131911	0.743386324611	0.876424196586	0.681532055426	0.666220259106	0.643173189775	0.697747171924	0.713797499349	0.693139247685	0.905174329018	0.586317071899	0.692650577619	0.825601054635	0.708485118293	0.921478826681	0.676745450646	0.670638242452	0.753430203641	0.807929841897	0.888401439585	0.724420980566 [...]
+M21Ewxr.140654	0.788352824373	0.732934005435	0.740637338523	0.737338534571	0.736852351468	0.712200915131	0.748601175277	0.750804179642	0.71035100516	0.667912344714	0.698638673952	0.742391080741	0.634116331945	0.670390594401	0.763431291132	0.750110907099	0.689231997913	0.728849942409	0.86305993055	0.774056443505	0.764831065826	0.737626271186	0.769450016565	0.75309727935	0.586696532028	0.832972717755	0.803564644311	0.801444116257	0.722813047926	0.665945622674	0.756903997512	0.707243742987	 [...]
+F21Forl.140295	0.812226930294	0.640414251683	0.554852474404	0.724154348519	0.629211270181	0.65845721674	0.72831357101	0.691383168767	0.708012342949	0.806602706612	0.735601175457	0.884270038291	0.724593169078	0.859692509615	0.574115504214	0.568362518047	0.605344281574	0.607872477975	0.679566037947	0.579231439702	0.918407672064	0.555279287651	0.608460368033	0.841686630398	0.671703838365	0.931504822889	0.641426231488	0.628981775218	0.689227450093	0.811898003364	0.873775633439	0.768953836026 [...]
+F24Fcsw.140765	0.923957390414	0.793073612828	0.855373824095	0.792947539386	0.861051980611	0.84685768395	0.901159697394	0.808582813245	0.793049733339	0.881390162481	0.774971891163	0.728681775876	0.790320044227	0.675548797839	0.858165493019	0.851403431979	0.811338958086	0.823713141658	0.913592238772	0.851539404694	0.944639539798	0.856666366068	0.853472027376	0.901494575242	0.819571787355	0.945099072739	0.900043353113	0.831204245432	0.697601683889	0.896742289026	0.566933202078	0.86346359907 [...]
+F13Plmr.140454	0.835754599596	0.664161997218	0.591673777607	0.75397696349	0.6306986285	0.679583457824	0.75001384858	0.717613454267	0.728061298164	0.804389550976	0.743989527997	0.862256054146	0.743566494066	0.838500688602	0.62881955441	0.516786238154	0.574811412607	0.655990354063	0.724962116243	0.617450774858	0.920461447652	0.597598078557	0.6615104836	0.852707818074	0.678949960804	0.934341169312	0.688342727683	0.639683294022	0.693968845261	0.834478745043	0.858284821125	0.771304312032	0.87 [...]
+M31Indr.140588	0.824969907927	0.586609188252	0.573871848238	0.570404934966	0.608697473564	0.659295433495	0.718773366502	0.500381078611	0.54067062766	0.772084026576	0.543045761054	0.862920529049	0.661000787661	0.806552347056	0.519921014993	0.612958528852	0.567265636013	0.445227325803	0.761799131683	0.530758208548	0.872856390347	0.606073492327	0.549172041474	0.811865733666	0.585002670452	0.916790588841	0.699928381042	0.710294410589	0.686434327399	0.773208904165	0.853675867188	0.66752961161 [...]
+F33Fotl.140406	0.842923480185	0.709948724455	0.653266350037	0.814033343298	0.646558026671	0.742012181495	0.82081288147	0.816968900945	0.82670457356	0.839053057211	0.830296613652	0.897834676862	0.791498521248	0.889162974629	0.694595320122	0.63657829723	0.665507837359	0.745918266104	0.676416392395	0.687877881016	0.930962254141	0.646069931209	0.698932136397	0.890965812925	0.781091315586	0.93904048999	0.6941829996	0.622234185108	0.757080827323	0.855775951786	0.897613336371	0.794673197513	0.9 [...]
+M11Fotl.140711	0.646753366202	0.738230500752	0.713951718914	0.837775236986	0.74486161749	0.634955486044	0.684441547752	0.850552684837	0.850718711406	0.627590392786	0.814921520753	0.881953762501	0.671432164108	0.879006222851	0.754307592679	0.764700584753	0.7503403482	0.773102498366	0.741204816233	0.772296135882	0.836213225137	0.749507622792	0.691023464368	0.722729437612	0.680683293768	0.773918444154	0.686249311548	0.808611972843	0.768100338928	0.607469871633	0.887333599106	0.683118710766	 [...]
+M64Tong.140427	0.895435592221	0.75699629726	0.698330454391	0.33806573443	0.787668995866	0.753446811913	0.851765858038	0.433472386146	0.459728991741	0.837150386922	0.390852385645	0.810307979118	0.717175575517	0.785944575036	0.647247139932	0.713467820485	0.741537149485	0.520407865487	0.86721693909	0.704281385921	0.915299413536	0.738078462982	0.737647071433	0.899693472368	0.687104513247	0.915608665219	0.843199785864	0.829744619216	0.796697164412	0.841887306728	0.806920230943	0.768748653196	 [...]
+M13Fcsw.140390	0.941038666992	0.792167750594	0.861435461304	0.841809818619	0.869360843466	0.858928003047	0.918232717804	0.857115893567	0.834695118169	0.905660391803	0.83883628234	0.664538813108	0.836198931235	0.647810349838	0.874402897025	0.870389645859	0.827579230901	0.844130709212	0.912943969151	0.876106590445	0.968643828381	0.876674380071	0.880111590071	0.929945312613	0.876931510651	0.967004759028	0.908252283096	0.836588316961	0.776920805012	0.918686667991	0.380758617862	0.90631667885 [...]
+M33Ewax.140613	0.710097597773	0.802858066922	0.788820957166	0.754871631745	0.761366813882	0.716090498575	0.754630063538	0.76390648177	0.741121374681	0.652366728608	0.728458715083	0.865153097686	0.735144064914	0.861694053983	0.791452565521	0.795682581932	0.775725856955	0.767111631072	0.749191695354	0.810918181009	0.655771767606	0.772641900303	0.764646624244	0.740132205326	0.697788500201	0.731187800909	0.721670772307	0.854723123863	0.849574059694	0.570640312743	0.892878943347	0.62641910372 [...]
+F22Navl.140838	0.681657100216	0.682390251082	0.589330182283	0.763957485201	0.615634187105	0.654455777833	0.562618129884	0.72848940417	0.742118972007	0.714639020473	0.783404673281	0.896327091078	0.717628173256	0.875391841664	0.647438886974	0.633366375862	0.664090580183	0.626170683415	0.755840206787	0.665829147456	0.85003409763	0.655528843063	0.625408108337	0.709993870573	0.613433818569	0.870466095586	0.642780031124	0.762777900343	0.721337290993	0.718745219921	0.88810807708	0.648567741597	 [...]
+F34Plml.140846	0.81410627701	0.619528872876	0.593078272376	0.717640754402	0.519813253819	0.661917114708	0.739802917144	0.677605776615	0.676593585748	0.793901914308	0.699619030524	0.842232762579	0.676093594687	0.814638984862	0.626211015618	0.548574261365	0.515009120943	0.617451494275	0.677308881307	0.603036313397	0.908028842881	0.539234411697	0.616541307596	0.854669454882	0.637562320254	0.920141972246	0.655909587831	0.626025425407	0.6778480174	0.804740463793	0.840362697269	0.727579159493	 [...]
+M24Tong.140439	0.852879222114	0.725444108813	0.660003045067	0.312363317831	0.746210099932	0.754001374393	0.829900325074	0.287623541007	0.323386523353	0.80261481681	0.269642269628	0.844623440675	0.698508391134	0.784744579149	0.58758082297	0.690377780703	0.702032898954	0.517134460732	0.812365114893	0.605524215405	0.910949562927	0.68902476197	0.713077939867	0.896513641198	0.642977118092	0.910926935346	0.804013575924	0.816118987798	0.77519375806	0.825326744688	0.847461733812	0.726367168045	0 [...]
+M22Nose.140594	0.813851492657	0.625275736356	0.678539060745	0.735648739708	0.701903085486	0.697740088137	0.766081281666	0.693139704915	0.673584359878	0.780392954089	0.702145691376	0.705534766098	0.655800959167	0.631588828085	0.685432385374	0.686068357297	0.620079518804	0.651361776229	0.824198486645	0.686267683406	0.902989301095	0.702682881255	0.66838552638	0.81984423005	0.680867431628	0.919903513172	0.759885268984	0.734977846849	0.632790383202	0.779486796539	0.717969251912	0.786545197888 [...]
+M54Fcsw.140330	0.934197464853	0.842655009703	0.855154050623	0.839194399007	0.86263061991	0.825112186538	0.919100689094	0.852903804584	0.826053379413	0.870620926393	0.81728686637	0.399655231173	0.827091191048	0.605767069787	0.876074285804	0.865473346973	0.816741921134	0.851215612708	0.914310782574	0.890150376713	0.950659598649	0.877838213775	0.886071608213	0.931660528834	0.863740121095	0.94647574691	0.898080772565	0.866075538107	0.832136696436	0.875768826238	0.696087149844	0.900472319602	 [...]
+F32Forr.140528	0.758892358035	0.580427647707	0.624498540565	0.689951698615	0.608251202328	0.607667775066	0.636340799126	0.670180865578	0.674828439898	0.725190931178	0.644166024938	0.842901819955	0.649051038895	0.814927987152	0.608974706096	0.574104973317	0.534836307069	0.596294153883	0.722284709738	0.588089763	0.884493333794	0.608794224421	0.589219007735	0.778118499044	0.504645053853	0.89917974197	0.675903244803	0.689192221892	0.652021329543	0.753602094805	0.840086782165	0.690590065381	0 [...]
+M11Pinr.140546	0.635112538116	0.711487038577	0.682678462148	0.834408916232	0.675377063496	0.661135397937	0.642711539931	0.807461320799	0.805090196779	0.679171877918	0.81626518074	0.904600355325	0.66754676372	0.892487134094	0.69901882882	0.704335816432	0.69731149143	0.733841366313	0.693736310258	0.731529739563	0.834828509652	0.674585280517	0.68545524019	0.725975366703	0.66020414971	0.864832248984	0.633463483338	0.762685877625	0.711531002497	0.695896180162	0.90023466027	0.66908958656	0.916 [...]
+M53Fotr.140451	0.862315755179	0.672180308882	0.631797137976	0.770162761986	0.664776199958	0.729982774235	0.796770764597	0.751330496738	0.747303526352	0.851286902543	0.785683944211	0.861188756281	0.763901685766	0.835492399175	0.644544428859	0.607411028625	0.636516976051	0.695277736019	0.733468126733	0.624514063675	0.936586186553	0.645413452776	0.666831610387	0.890394121039	0.743134643268	0.947124477602	0.724524836835	0.626840383363	0.677779838738	0.854782986053	0.847389635873	0.8187842929 [...]
+F21Fcsw.140506	0.942523647967	0.823840286251	0.863617239345	0.777890216244	0.86730321922	0.871394729069	0.928958812722	0.797453841058	0.756710404731	0.906392900801	0.764075410244	0.714307905623	0.806735602419	0.665026327966	0.854153424778	0.846658297372	0.824725087402	0.840882425854	0.911704490928	0.853341181995	0.953563695624	0.860585760564	0.869188186729	0.948431948042	0.854467826159	0.954400470317	0.906280283587	0.840543168529	0.696862775832	0.913285766615	0.628078722808	0.89987220796 [...]
+M33Fcsw.140557	0.948113251229	0.831219024994	0.897668274808	0.862678380986	0.894586061773	0.893324008147	0.931869739711	0.884049791818	0.850535379109	0.91720274002	0.862554359932	0.676382281914	0.838086253844	0.676572762265	0.909684926558	0.903500340099	0.868052125395	0.883859429182	0.931301598616	0.895129010678	0.965019754722	0.902831082297	0.913591623604	0.947757268804	0.888328533411	0.962967511951	0.922189263628	0.86306892314	0.781169615174	0.924782039975	0.538149917789	0.910921218752 [...]
+M42Aptl.140537	0.639884047076	0.695249072178	0.741502265557	0.837841610082	0.754973797542	0.668121882392	0.615354199652	0.844911283163	0.844768773211	0.565232568113	0.820716766683	0.895371469831	0.65355457382	0.892694328167	0.765744722266	0.760217021152	0.750622904701	0.758918629325	0.752596373498	0.782478731744	0.772866088643	0.754355953335	0.684896340908	0.600205271166	0.631086801565	0.785848290051	0.690663031793	0.82567701658	0.757679501586	0.253815061857	0.898141634167	0.656814978297 [...]
+M41Mout.140367	0.908154841466	0.748254604538	0.694997689778	0.319196330515	0.775486212479	0.806065074815	0.87388656932	0.363761522099	0.3218303685	0.844290120659	0.371830659908	0.852213918195	0.738193553631	0.794746631888	0.624157116241	0.719929830915	0.736988583739	0.546099184634	0.857343674681	0.641971900216	0.926635481685	0.726678676598	0.748692831011	0.921924576148	0.720476135289	0.927639591325	0.83823640604	0.834393088594	0.795199854548	0.851887140494	0.850061974392	0.801016091456	0 [...]
+M42Forr.140521	0.831933948062	0.609568104483	0.634740033972	0.715048180706	0.613558905902	0.683307151643	0.746297485749	0.674097976709	0.667112781772	0.793009792405	0.6909930343	0.894721649198	0.701664489359	0.866378820589	0.541152779292	0.584823322994	0.571616781435	0.592728626758	0.737845934306	0.581672796765	0.913381630337	0.574236801828	0.600724386933	0.837962612886	0.60457981094	0.927564164231	0.696036415116	0.665322165636	0.707130560715	0.791852112951	0.879637245673	0.705077711674	 [...]
+F21Fotl.140348	0.68975507596	0.91714850313	0.921018888308	0.911955955378	0.913007237101	0.839316323656	0.814248063566	0.913260428278	0.906352081343	0.753127984785	0.889494543592	0.951752882832	0.851680179902	0.95018486188	0.92256210277	0.917349662836	0.916394461325	0.914506662971	0.88674078163	0.921736090032	0.560059242822	0.904412608887	0.906969607699	0.688962535161	0.845633087996	0.297017674803	0.881582791569	0.935357653694	0.915720131814	0.743527416098	0.947314257505	0.788873685656	0. [...]
+M43Plml.140405	0.824826716137	0.611085595384	0.544501409522	0.674336354112	0.594733315998	0.693598275889	0.730543075092	0.64298062212	0.627242709214	0.807881186004	0.685568071916	0.865010787587	0.661132943894	0.823686744284	0.569068734123	0.548394908995	0.530451236792	0.586554037152	0.69950334826	0.542446766924	0.902507284245	0.569791247344	0.617705470816	0.834493064393	0.638090782227	0.929604611766	0.703244358702	0.647401490606	0.663442433912	0.807195849585	0.861697359389	0.754762155609 [...]
+M21Forr.140361	0.850729952376	0.609898738532	0.625491097481	0.688916660769	0.610999759622	0.714355942787	0.746333730797	0.6735739792	0.626524277703	0.818065495023	0.714921310698	0.817626001044	0.651873492097	0.785769099919	0.609024749358	0.629051096094	0.573005984959	0.631210265902	0.776618438084	0.618544155234	0.924842831786	0.609037355642	0.661116997299	0.851515633513	0.686983099358	0.935814456018	0.703361073377	0.677478813635	0.631490337512	0.818335151446	0.80708311881	0.785645757282	 [...]
+M64Ewax.140550	0.590001276161	0.822464070948	0.845958716999	0.907423933247	0.849379194359	0.723028820517	0.55955187219	0.87680956304	0.878690142043	0.51857891096	0.886543645901	0.935033450583	0.677880440474	0.936912278385	0.849997376419	0.841665948239	0.843895337459	0.813899304671	0.84869250046	0.865911901298	0.607892982635	0.828994566149	0.796786160607	0.327058850067	0.685478399138	0.605537628343	0.825572809195	0.89267186422	0.844156325903	0.526409929888	0.931915596145	0.625295251446	0. [...]
+M34Ewax.140864	0.703021868787	0.875094964884	0.873404443945	0.877518955801	0.855310477453	0.803580116556	0.721367904566	0.877804702238	0.879806805257	0.683184748886	0.847838614989	0.938402143604	0.748304765642	0.93652539789	0.87837146854	0.875943258238	0.864872065727	0.857028157266	0.889948106546	0.887063958557	0.514572052402	0.860594867863	0.840985215631	0.583631591506	0.747245994797	0.605176087792	0.843392875055	0.912592168983	0.871559972252	0.642314436002	0.926283265006	0.653465418093 [...]
+M12Pinl.140722	0.625926568818	0.67202051097	0.67814463036	0.76792852954	0.702361059524	0.555230607078	0.659121879882	0.733790929311	0.737888177969	0.63842176259	0.722137249239	0.855405269269	0.598322587792	0.858710555019	0.644990483645	0.671498727656	0.622126865374	0.696415287052	0.710263673083	0.652603257519	0.79337723455	0.61921355369	0.639364880853	0.725424291594	0.533784546406	0.857658365245	0.636863194628	0.734211376727	0.685068872373	0.677171540679	0.864554654525	0.649717514124	0.8 [...]
+M63Fotl.140731	0.832433169729	0.658422014044	0.662945367235	0.796413053043	0.670957747452	0.675675106625	0.76303925122	0.762328136857	0.783621421962	0.809189773094	0.776010678395	0.884655379343	0.742301732431	0.876277627351	0.646570315016	0.624553527036	0.623378225219	0.695767838977	0.702041330774	0.630383241495	0.918420063377	0.617388877032	0.676378240668	0.853700598766	0.68975288464	0.927081158638	0.677992574508	0.604448314426	0.719477066253	0.821220462132	0.880281968464	0.77650138788	 [...]
+F32Fotr.140582	0.778042601146	0.706397011001	0.674031335207	0.82450220207	0.709498856952	0.641569394039	0.695364978759	0.816730047758	0.817666964985	0.748426633505	0.81716651305	0.871861614589	0.697831072288	0.875727665302	0.689089014065	0.654991463342	0.659053971583	0.744116550551	0.700807421246	0.68374239167	0.886194101578	0.629332187068	0.662329056918	0.809309025695	0.641028783545	0.908209039823	0.677999646887	0.684972252426	0.685034182408	0.797600882416	0.862073392622	0.699222265558	 [...]
+M23Knee.140621	0.770028096004	0.663740445076	0.631344594837	0.80893282691	0.553132939981	0.683333744185	0.719888928472	0.788364859362	0.754126994845	0.75100317552	0.786966750973	0.836031881516	0.696852769184	0.820917413316	0.661774281271	0.642970216095	0.584004306876	0.715909541691	0.628405811097	0.670401754304	0.885704013841	0.620562450452	0.627549921311	0.82976023439	0.680359772049	0.894307540716	0.641992361936	0.654888446159	0.664144212061	0.782857634095	0.843331830264	0.65355635571	0 [...]
+F21Ewxr.140299	0.735728771905	0.868718643672	0.88540940496	0.881744683483	0.855964688005	0.839931668105	0.796461106604	0.885800747641	0.877171057517	0.747345815749	0.853537213901	0.906899978147	0.783190655557	0.909747144885	0.874521656568	0.871793996835	0.853015649531	0.871189157301	0.898844887343	0.883372077723	0.451940714816	0.874782097209	0.865141752065	0.672446838496	0.743704010427	0.626021964176	0.868737613987	0.904386602313	0.868831174032	0.732005058592	0.912035315814	0.76017868148 [...]
+F22Fcsw.140281	0.946913091634	0.824817023307	0.864557049032	0.800232765113	0.865487363113	0.879523154206	0.927841711878	0.820765007549	0.784596584768	0.913506390278	0.791913525347	0.712751135708	0.83385823946	0.670933175109	0.868434178049	0.863793384967	0.825835063134	0.83934096213	0.916168730482	0.847944850697	0.957380871014	0.860005739554	0.87952967841	0.952463603787	0.855159127136	0.958323568248	0.911163675435	0.832506149874	0.704390938056	0.919997766573	0.624518343056	0.908065218307	 [...]
+M22Plmr.140574	0.840826302978	0.633843500641	0.667727661581	0.750215726581	0.615857818689	0.688023021808	0.749611849224	0.722080898971	0.696021001273	0.837864011546	0.741575391253	0.822377177059	0.730276071289	0.787404611961	0.630349013812	0.635175936472	0.530800974466	0.640508973258	0.758896050718	0.608664851791	0.919009361893	0.623619356678	0.661219803838	0.854163487496	0.68981534505	0.940226693326	0.712967110195	0.635593834488	0.649456148636	0.826198193306	0.815651175635	0.7862124184	 [...]
+M43Tong.140396	0.891102807765	0.775544073364	0.697939567538	0.276782096285	0.784364542792	0.777924692932	0.856525427194	0.361819527916	0.384781967171	0.842201858457	0.283649515566	0.842579823197	0.711768836309	0.779456523309	0.652478075454	0.715014419107	0.749394227991	0.541152446446	0.85711653424	0.647507363134	0.911364556173	0.728206071089	0.740774292315	0.906980291287	0.677038699214	0.911375174274	0.837891744162	0.850203223589	0.790359880173	0.847259020315	0.849768160527	0.77156573125 [...]
+M41Tong.140317	0.905237734761	0.78033445225	0.713023675199	0.328460734987	0.79488702277	0.779787699315	0.864360138078	0.385773666751	0.393982542006	0.852386287711	0.381692874028	0.830136274176	0.746799809881	0.781495170567	0.627657079695	0.720998800518	0.748665296945	0.533775950505	0.869567496609	0.674815320184	0.924063512129	0.749481454516	0.755193897394	0.909437587052	0.701409328175	0.924931274084	0.848744963421	0.837959479311	0.78348259405	0.857113987159	0.828369859534	0.787602645368	 [...]
+M42Pinl.140354	0.766138726705	0.680316543624	0.653276171126	0.677394722153	0.648924292362	0.675589769595	0.68161242355	0.624075516224	0.627941188063	0.667425652789	0.632708500036	0.882913470598	0.656904881003	0.861651719564	0.614017373137	0.691951018306	0.638024137649	0.562954210266	0.765016883363	0.666202757361	0.800620318534	0.626623270186	0.665676240502	0.755160108944	0.561446526903	0.861445022899	0.704736422806	0.799038528621	0.779611115171	0.665651210751	0.878522301279	0.57586923102 [...]
+F32Aptl.140820	0.843131549459	0.70049238672	0.678421079012	0.762033742288	0.668164822735	0.70589694755	0.711865336286	0.735694482588	0.73880953006	0.808134654229	0.7435700714	0.895194217468	0.761504268428	0.863829096332	0.676330076124	0.669928727882	0.620179908789	0.706695334201	0.699221889469	0.65837833215	0.906855428786	0.649620570137	0.689832219523	0.857363108651	0.676573868776	0.934119499974	0.682708303306	0.709339053616	0.731300513942	0.82101042451	0.890149684793	0.786518131093	0.89 [...]
+F11Plml.140803	0.834178602367	0.631181419539	0.595647432975	0.711173732886	0.651420573282	0.704396859254	0.768861753441	0.648992681199	0.674201994051	0.831066465053	0.714138676046	0.88303458522	0.719436835996	0.866491041178	0.45579653564	0.550930432483	0.567568447563	0.593060359778	0.726726048984	0.508256675833	0.900009130369	0.581281926545	0.638109604553	0.862721672799	0.689702296056	0.937139915632	0.690351481177	0.638313219965	0.687708413814	0.833950150539	0.884775598651	0.77993926091	 [...]
+M33Knee.140298	0.79188542592	0.621780331121	0.706692872604	0.761797888122	0.735309064334	0.629226402861	0.7229411604	0.750335128139	0.720347431004	0.712681627068	0.746428842291	0.809743609844	0.473104204343	0.790019883684	0.718013196504	0.667658867319	0.668998290754	0.663128235089	0.820621475181	0.712071008074	0.868706268783	0.703811801653	0.566839334982	0.77982078263	0.591192650462	0.888058382726	0.770589240439	0.744405779793	0.592825492141	0.739531019223	0.796028703495	0.719862542955	0 [...]
+M13Fotr.140755	0.69243382434	0.652581996394	0.650387426976	0.837423786643	0.627104900548	0.613877694818	0.654360989443	0.815327798721	0.812400742173	0.682475749545	0.814090381472	0.886633784388	0.666110566824	0.882230658558	0.691242247218	0.584837955939	0.648876674929	0.728434417074	0.652394214114	0.671186248854	0.86482797086	0.630957190672	0.62160161133	0.748038490774	0.663713339852	0.866360465687	0.59434763173	0.684369225057	0.720935511833	0.72714010258	0.890071923561	0.677180402796	0. [...]
+M31Mout.140342	0.899072141751	0.739410806326	0.68258170798	0.411596990606	0.763536187165	0.77839314901	0.855629345673	0.353588569687	0.421468454712	0.846214213128	0.460681815044	0.849465099397	0.732445257726	0.801707191939	0.517439198982	0.705699671734	0.716859318075	0.496651314137	0.86062129578	0.645478811745	0.887782041941	0.724256569217	0.718960339099	0.89497010935	0.718966239508	0.930378931023	0.835793887367	0.820055163185	0.788938659569	0.834296720919	0.846746124074	0.777004260886	0 [...]
+F11Forl.140437	0.801530748421	0.570518023572	0.584544281496	0.747630617792	0.619425257086	0.679203365389	0.717625195228	0.704255868468	0.689556899104	0.795383921191	0.745759417113	0.850403115004	0.670778398809	0.80044039696	0.544169540375	0.564947719103	0.565357448637	0.609584703991	0.684311911894	0.552931209083	0.904242898618	0.601085014659	0.617559225617	0.810701786856	0.652961278021	0.923352279191	0.677834018907	0.61850644681	0.641139030333	0.790374547746	0.824185996658	0.76191468198	 [...]
+F34Fcsw.140733	0.940516261302	0.799631063466	0.864899405491	0.841087992352	0.863857828917	0.883142341778	0.919747032418	0.862716797707	0.814170296781	0.909049556408	0.836386503992	0.690362990349	0.85114311509	0.627704798265	0.883206488265	0.878696322755	0.830559661269	0.852929391746	0.911358868097	0.871810808854	0.964452151698	0.870232913963	0.891334700112	0.946949780387	0.871348458061	0.962330748073	0.913557455793	0.849830036237	0.788245113484	0.916614631574	0.581301135508	0.90964883320 [...]
+M22Forl.140587	0.841041437866	0.600973572163	0.674192167588	0.754231325589	0.666990115391	0.707278069573	0.7603544528	0.732822831472	0.694985496865	0.799783550627	0.731148026351	0.754153616415	0.660897209207	0.679856369741	0.643457016338	0.648092414349	0.567725197253	0.648974855353	0.775675740966	0.651006044637	0.914919089521	0.659528570811	0.644154607232	0.842193776548	0.672206745901	0.924308950301	0.745156309643	0.688454524042	0.613033620537	0.800304047614	0.749571813135	0.757430441745 [...]
+F33Frhd.140757	0.766804120664	0.632953571501	0.586376256426	0.749130946982	0.610316779193	0.638620850352	0.713329249522	0.713064152508	0.726680797317	0.753106330369	0.719625824064	0.86579203175	0.64407185383	0.851987503891	0.598172238691	0.558181905192	0.563373308841	0.646562777394	0.712230322241	0.595411361863	0.896693138408	0.582297078689	0.602410686332	0.802190168191	0.627902169859	0.905862536369	0.669689798402	0.636795560511	0.650325641923	0.777036652909	0.87262434383	0.709417108756	 [...]
+M32Indr.140748	0.7452181987	0.637994866208	0.531179163724	0.725124294185	0.572423797072	0.619114680061	0.682553901842	0.709271624686	0.718431555482	0.712971320525	0.699021606426	0.846811530096	0.668413003864	0.835312016975	0.602164255341	0.617774734494	0.56460598983	0.62268520788	0.648180095835	0.634178410396	0.876384196697	0.582738164367	0.576256346261	0.768711828252	0.626908645472	0.898814981191	0.610491995047	0.669680652468	0.709986356774	0.717432913052	0.85403114074	0.651374327622	0. [...]
+F21Indr.140440	0.82932272346	0.698073230281	0.640355949669	0.47153580645	0.702249433444	0.714525996458	0.770242815892	0.523383151889	0.530803753909	0.803457623614	0.573766138036	0.855850962167	0.73907647844	0.808498046687	0.546736051774	0.658463832489	0.632520505091	0.499627088472	0.794763476982	0.577951288718	0.905752677064	0.62284397842	0.677311188697	0.859348556835	0.655557242238	0.922978598791	0.733166455723	0.755388458924	0.731251347298	0.813175031913	0.83860967816	0.72962223871	0.8 [...]
+M12Tong.140559	0.881522513623	0.728956857888	0.674470559087	0.215871107398	0.757219212444	0.76348459018	0.841888865494	0.319777203729	0.429804347344	0.827243795645	0.241086474045	0.846671665057	0.716414177768	0.794235766789	0.602771820363	0.698639805477	0.712866959943	0.498954952562	0.848412080079	0.6207383673	0.898393741393	0.702390150521	0.705351827799	0.884958930378	0.68433319271	0.924332306096	0.820459269471	0.81867992381	0.788163056456	0.81204039948	0.849127540469	0.760312328146	0.8 [...]
+M31Pinl.140703	0.747451421995	0.646904613471	0.596423942878	0.729406217966	0.587929714973	0.589918226194	0.612630690374	0.720850889945	0.717731304515	0.693034567853	0.708611437016	0.866453549265	0.597718940512	0.829352188979	0.660594315867	0.591129217828	0.60572589019	0.600136657066	0.721589257415	0.65171999349	0.855953678178	0.594485999433	0.563546967498	0.737922104314	0.537108599124	0.885818180845	0.625963289127	0.706534418857	0.677974905855	0.734093225425	0.851643916677	0.640978144126 [...]
+F32Forl.140724	0.762266202817	0.670069137615	0.605589825768	0.758047562225	0.549315270885	0.680381005662	0.67974919008	0.730325491995	0.75491209291	0.736743077177	0.726378925106	0.856585740157	0.70424204293	0.851361846682	0.616919175254	0.631552406419	0.600227006124	0.667029059736	0.708959959975	0.642075907892	0.882907899372	0.585134449248	0.697107307949	0.781272158167	0.664504276443	0.904492847034	0.64133967969	0.691821766907	0.72398956217	0.762262996274	0.853564332651	0.700453476744	0. [...]
+M33Fotr.140533	0.73608275549	0.610766400144	0.660147957409	0.794931848929	0.651785001257	0.57354502595	0.72506015806	0.787934223617	0.765569005029	0.720682408947	0.790247199807	0.799835714769	0.617566535388	0.821597534372	0.696376140021	0.661003587217	0.642879325887	0.69281600891	0.675325874629	0.68712906457	0.877109095029	0.670078159436	0.618333632261	0.81684715062	0.691116511126	0.886083613393	0.65719685943	0.670889636718	0.67260091141	0.749072255698	0.828420888631	0.709161950841	0.832 [...]
+F32Plmr.140529	0.823254004982	0.65424621477	0.587531673405	0.691154743484	0.574554179186	0.695056679121	0.721368968144	0.660840264854	0.669582032444	0.796917672234	0.694027273651	0.895017039416	0.728200090893	0.856441508398	0.615056379555	0.597525770167	0.587724255227	0.61516440746	0.726060606863	0.529373386202	0.914704067857	0.570483845668	0.639774328588	0.827663923906	0.623620303718	0.928846314448	0.666251205116	0.66805760629	0.69500954907	0.806576572502	0.887505882231	0.714735773973	0 [...]
+M31Pinr.140627	0.663506662292	0.677293787154	0.728267683309	0.785630569624	0.700360911075	0.688386371762	0.577472187097	0.762859634654	0.775592911038	0.654252983878	0.741258213307	0.893082931506	0.628846803328	0.889370230479	0.723465714971	0.728226128008	0.685968801421	0.704043308231	0.800758251038	0.732178586756	0.778489381043	0.715841982432	0.650635493563	0.673078462541	0.528828635108	0.817181891127	0.725809652965	0.801399103971	0.73693362156	0.694585107358	0.87319428617	0.621860119267 [...]
+F31Pinr.140799	0.656932430944	0.740884040388	0.721901292481	0.855202601692	0.695835513275	0.627836251405	0.566751263464	0.82326160406	0.836003234202	0.656491700314	0.831635621131	0.904477053209	0.704643754859	0.906075024135	0.766218170695	0.731311147083	0.738739238832	0.778017505267	0.732530450429	0.757843927157	0.804488225742	0.714371093792	0.686106160878	0.685682418786	0.617348683461	0.821381194979	0.668857545998	0.801752049144	0.791473811707	0.74155511039	0.905832670952	0.654938011301 [...]
+M21Knel.140490	0.855674490544	0.64176701215	0.646819791022	0.715605366247	0.680592093075	0.71584379788	0.759227387335	0.70204285196	0.675303663531	0.82784227833	0.742770640089	0.822050413725	0.739385742328	0.754789636293	0.628090974479	0.645364158062	0.583299494616	0.642922972031	0.760551609088	0.623649592158	0.931750425894	0.645404967105	0.691150438694	0.868127304795	0.707889783877	0.938889613397	0.731532685027	0.673382965878	0.642885163624	0.827273305267	0.802831296241	0.804902155206	0 [...]
+M41Indl.140854	0.847415381805	0.668261294914	0.639878875494	0.418895167277	0.729525961888	0.733955689034	0.792375698593	0.402012725544	0.497257096513	0.788899922268	0.527438681605	0.855390626149	0.669192252308	0.817409184832	0.499280367036	0.627619830272	0.662282406833	0.445654158999	0.837710030618	0.593830432145	0.880933398276	0.671950394345	0.683073644959	0.841551785318	0.640050699206	0.920236143722	0.791938968484	0.786411673934	0.729479680561	0.780911577288	0.853459625228	0.7431083029 [...]
+F21Kner.140678	0.757026412525	0.583827322995	0.673907844059	0.772025742494	0.678155544971	0.616549304907	0.663086114168	0.752861226858	0.739100689022	0.720303892628	0.744493427231	0.842442029263	0.588279728962	0.82860787352	0.621813565788	0.629401650354	0.613198598923	0.681481839588	0.675875385429	0.678738246209	0.880356048048	0.610986622136	0.545725187159	0.760470441518	0.610766187493	0.897688888465	0.686952891538	0.661976547387	0.586584825169	0.744443049243	0.826740205401	0.7083804991	 [...]
+M41Knel.140855	0.836655919043	0.56383591946	0.610032826263	0.741743345007	0.665042828482	0.689888947143	0.766721162985	0.717686945408	0.693734592274	0.812936605251	0.761312735866	0.848433510181	0.713806994381	0.820193543797	0.59098643101	0.597397015691	0.587407025378	0.609328335783	0.723903522805	0.577595122918	0.928031300829	0.613261782679	0.640090265887	0.852570484095	0.702319325811	0.936913822752	0.697169311967	0.631099395869	0.648735454719	0.806491104686	0.832557948239	0.78660183505	 [...]
+F31Forr.140586	0.820844091288	0.643101357683	0.581661462121	0.695174898682	0.591951297366	0.665173100772	0.736370677114	0.662445854732	0.684112561337	0.790789749732	0.701474256029	0.872648035867	0.705881745696	0.838738431709	0.608063947235	0.551632586739	0.586793006337	0.612330280702	0.70220815189	0.593362071226	0.902986526243	0.60499581322	0.613899388662	0.844270672534	0.613997509428	0.921007044136	0.672620956431	0.667136176686	0.686455848653	0.811714045392	0.867550540932	0.715390908673 [...]
+M14Knee.140681	0.778383626464	0.615028958483	0.685571724565	0.781671652422	0.716815233148	0.664801771424	0.748693056536	0.76736096396	0.743125349346	0.743290682655	0.763463706599	0.791435119414	0.590515074475	0.777336876344	0.687802016198	0.674521225293	0.670129398251	0.678666354647	0.817307224311	0.710522721743	0.892117903757	0.691124235315	0.61173944509	0.791849111872	0.708502695693	0.90466851978	0.767693059698	0.71325906988	0.636882131652	0.735020694014	0.76630658303	0.781439937335	0. [...]
+F32Ewxr.140535	0.769120150664	0.656711327044	0.680641625027	0.806658274844	0.670849601166	0.666199753083	0.705803857134	0.775490946669	0.756623623583	0.692012254991	0.781349897536	0.782919584907	0.613755553574	0.759534397788	0.724997632555	0.675327100522	0.650709110501	0.722799796099	0.756146727937	0.725969173373	0.864957840077	0.69196942056	0.69243953847	0.77569315531	0.623569608133	0.881111517974	0.709365335908	0.747753511093	0.700272970296	0.745125165431	0.730316509455	0.704557510651	 [...]
+M12Fotr.140278	0.629499239477	0.720672207073	0.697801962969	0.861832905299	0.662211602452	0.631847507529	0.622475757673	0.839413933397	0.838574319532	0.65479954954	0.851645409686	0.911060502919	0.698401482285	0.911956926392	0.721241926157	0.696172038684	0.686584003658	0.752158797545	0.629336659862	0.707117656712	0.840106365672	0.675561034591	0.654950033403	0.704331132809	0.671359957286	0.844833173495	0.607454406112	0.740262397716	0.758071457856	0.645740160917	0.903728753043	0.65254696767 [...]
+M43Fotr.140475	0.675167838019	0.705191183341	0.682301505555	0.870478401268	0.659631933808	0.624139854548	0.64920140544	0.839363518949	0.849368311199	0.660568690935	0.839964931211	0.9176904327	0.729576234163	0.907280298476	0.707642919745	0.665310737258	0.649155185195	0.753105241788	0.660028741907	0.693679655613	0.858804931775	0.638473472559	0.67575364281	0.737885917037	0.655058420713	0.866782842773	0.649604326157	0.684980184225	0.700648929705	0.698329324988	0.906387087945	0.638999403536	0 [...]
+M12Plmr.140584	0.715710745245	0.61542861189	0.583589447001	0.692769996997	0.627642962743	0.574409563732	0.62951410268	0.675936792578	0.677982525814	0.726218824545	0.672819598195	0.873099217355	0.643839676329	0.854156207702	0.576079469337	0.593630620271	0.5327868616	0.565697801654	0.664674653048	0.581908285375	0.870995823419	0.566024532621	0.578063218946	0.762671106587	0.543840378888	0.901956651679	0.633254617386	0.692822968515	0.691897185764	0.72157628973	0.874792056654	0.658956538898	0. [...]
+M54Ewax.140661	0.725038624564	0.847835250405	0.841775009911	0.850965098902	0.836954187458	0.775407016377	0.604020374508	0.865985888334	0.867197292627	0.648320828867	0.819809634897	0.910489387451	0.759375344344	0.888745677695	0.851867934222	0.828628320372	0.835458776194	0.828503645875	0.877549644784	0.86508562553	0.648941964906	0.827185996549	0.840116644587	0.534130388632	0.667202931661	0.679802890411	0.854662051972	0.895603032122	0.865097324416	0.605682601766	0.907123422408	0.69112875122 [...]
+F12Indl.140710	0.767276822497	0.63025975123	0.573633431877	0.717401062346	0.641333534234	0.570698414212	0.641067726941	0.683723830082	0.693456566771	0.748356034044	0.698154625727	0.862918162167	0.667215363512	0.83576014073	0.552627089209	0.5613319558	0.562518286108	0.583683975716	0.691308850784	0.581310022178	0.873385189038	0.545514683441	0.617206701925	0.795948159225	0.580975821266	0.911328229453	0.621847202007	0.696005253931	0.685996088925	0.761494223416	0.858508529857	0.708911019593	0 [...]
+M41Nose.140779	0.68745974274	0.650753965878	0.707108384472	0.731448341685	0.708866958261	0.601674423877	0.586799998119	0.709610942458	0.698587380404	0.564465017296	0.68186652486	0.859909899104	0.568671091557	0.824727622508	0.683576874255	0.648284276577	0.67019704292	0.63921320626	0.787006932812	0.680537754322	0.799645224891	0.674967935395	0.614497900504	0.622763733308	0.364265569241	0.827159656543	0.706553150543	0.776145116599	0.688250320965	0.60356247844	0.850597672076	0.597205675116	0. [...]
+F12Pinl.140479	0.668818206601	0.696729624371	0.652212515866	0.796258794965	0.632820335495	0.629903249148	0.605167617211	0.753431888396	0.759964948763	0.660815334232	0.766780127894	0.886350029522	0.690375903244	0.860110079291	0.659886416908	0.628133609511	0.640720706418	0.683481436957	0.680172359262	0.647356695348	0.803993009137	0.607657425721	0.675869604078	0.714611198653	0.527508034245	0.865503981826	0.596599909794	0.745302019064	0.72149692029	0.701090221132	0.881424654819	0.63314861229 [...]
+M32Plmr.140307	0.794830351541	0.635891944995	0.583893440263	0.765203941851	0.612026405807	0.649677065767	0.670652359436	0.747507433251	0.735611459297	0.749860502192	0.759490962356	0.867746230518	0.682978097523	0.83642487955	0.644833709923	0.573472508584	0.601773844046	0.645666053705	0.695853950041	0.632502713508	0.901235750184	0.600057453593	0.601388537575	0.78588174314	0.664379745737	0.917909050716	0.652217979142	0.661664432314	0.702597272286	0.754744729887	0.85991009578	0.711571525444	 [...]
+M21Forl.140771	0.836638220987	0.607248760274	0.614638246952	0.698871309595	0.606325032038	0.683038263458	0.770484005148	0.668444879723	0.655672671213	0.812891023264	0.675479920819	0.822950577781	0.673909755901	0.761074739595	0.605763272051	0.593449914139	0.560031136627	0.623274461411	0.756464086544	0.606923035666	0.921878347641	0.634261308247	0.654473374533	0.854929300791	0.681043164218	0.93333595827	0.71066020763	0.651705198691	0.627630894591	0.832036608938	0.809495945012	0.786816184619 [...]
+F11Fcsw.140616	0.939247676383	0.85053927638	0.887113174654	0.858344821173	0.890044321004	0.874884656897	0.923206547245	0.870280494772	0.840205194554	0.906594093535	0.858333426887	0.760926817643	0.829185979695	0.73258399124	0.887982855324	0.889262522171	0.84702689167	0.862631599659	0.927172475456	0.891409801385	0.95244906351	0.886707747568	0.892244911355	0.92603854721	0.868377871229	0.950003645285	0.915413938454	0.849735162548	0.747900041332	0.919602062819	0.601417112492	0.907964744374	0. [...]
+M42Fotl.140742	0.68623801087	0.753297099707	0.74049444293	0.823807303218	0.709805492395	0.636245565337	0.636155930671	0.798893866166	0.796153442878	0.619161053612	0.804580917901	0.895904878529	0.67986832277	0.883722833791	0.7481886987	0.714291717409	0.739061921024	0.766517676224	0.643781826618	0.752528276901	0.835907197955	0.705347484008	0.686927916123	0.724563777015	0.588593549802	0.790300500873	0.704142423606	0.775668942732	0.770780599361	0.635657005591	0.891250511378	0.562116071429	0. [...]
+M24Fcsw.140647	0.921929316094	0.752364832977	0.811463368173	0.774813489889	0.835004758412	0.807978900242	0.891003610534	0.786258561982	0.743013079758	0.856735584419	0.756601749302	0.663765033083	0.780508713313	0.508894051676	0.824057168409	0.814938691935	0.764089244779	0.793773738107	0.902110795628	0.815192151204	0.92715010554	0.829010485452	0.815753406381	0.902549607804	0.806976914113	0.929247222964	0.887497115796	0.812166395902	0.708982796109	0.85186184901	0.608246673985	0.876125507146 [...]
+M63Knee.140419	0.83107783548	0.607484813125	0.644676668504	0.796324393591	0.627281780023	0.690449170955	0.722148989497	0.765789678065	0.757851835953	0.788778408156	0.782620584938	0.831366578324	0.667176013562	0.821109385606	0.679285349691	0.620460704032	0.609002618904	0.677704603063	0.742365960722	0.656161611917	0.902754196765	0.613286191802	0.632692581221	0.834235287845	0.659558911387	0.91721108957	0.690216152378	0.615303105313	0.628214048405	0.788671134236	0.825705333635	0.779277366809 [...]
+F34Fotr.140673	0.841297337739	0.734619976086	0.687888646525	0.839282597926	0.652178958563	0.741946664099	0.777450576844	0.819658949895	0.823242003082	0.841629757102	0.829796753562	0.921868241623	0.797176575789	0.907423558344	0.677281121707	0.618231253774	0.673656274357	0.776926513345	0.670486367161	0.660159239172	0.931380512183	0.637856791406	0.71552106899	0.88525514556	0.769072102387	0.939803297657	0.685247910864	0.603679646035	0.73155699106	0.866479239004	0.915349135367	0.799774240812	 [...]
+M23Tong.140462	0.875059239856	0.765558494327	0.698989444859	0.343844784449	0.781745903799	0.761383203262	0.849027303934	0.406874390942	0.427659798162	0.81928168933	0.293280782521	0.829575624957	0.688286257962	0.795735624605	0.631859541857	0.715781904274	0.734452444829	0.545914428751	0.849065170847	0.670483375063	0.896458183814	0.726978539292	0.734581148774	0.892786923979	0.645897439673	0.895061613192	0.832737255514	0.845537231929	0.802952825578	0.824218147134	0.843612125511	0.73814285385 [...]
+M11Aptl.140830	0.575225490557	0.711686675882	0.686790880283	0.840166373992	0.67816290165	0.610314441193	0.578137118158	0.818341222836	0.816301685115	0.587542926586	0.825185692718	0.902590671702	0.677969113562	0.903402683976	0.717361441921	0.708240068979	0.700603050447	0.734425772678	0.642796001723	0.745911397517	0.815518309819	0.681007081722	0.663480448373	0.673532994391	0.621458282423	0.829629862032	0.616458616814	0.769422045211	0.763952652507	0.575183583213	0.896219634687	0.59064305852 [...]
+F32Pinl.140578	0.595449211979	0.740593637681	0.745321988739	0.889382284511	0.712042692361	0.666444328309	0.537940916492	0.847909964758	0.86215088701	0.583059815634	0.867931353771	0.925612440474	0.70157151762	0.918300447713	0.774342713185	0.742769262974	0.746762030255	0.786847826087	0.737070960115	0.777587704684	0.74863340227	0.755947123022	0.72202581624	0.59767515266	0.614905950442	0.784081947975	0.697336567371	0.816318001473	0.78455530465	0.63840322124	0.918818188739	0.585213500985	0.92 [...]
+M12Indr.140762	0.784569220323	0.578760674193	0.576490055617	0.60209521402	0.624906956364	0.643393808039	0.68554130337	0.557201415233	0.573497147872	0.749299695075	0.538295992396	0.859768471829	0.647770043983	0.803574229199	0.572457601583	0.622448424002	0.576405824666	0.541636856754	0.733262136542	0.560923355072	0.895436144356	0.580914367337	0.613841525308	0.804110848304	0.57931897127	0.913338898914	0.720407968864	0.706511232635	0.663337108781	0.756439148573	0.849927881671	0.706083675173	 [...]
+M34Plml.140373	0.808909537266	0.611561879646	0.582672318833	0.782318365363	0.646777865009	0.592085159932	0.732112848053	0.745466218806	0.761079175113	0.789743190548	0.758943541207	0.833712094884	0.628437947579	0.816159749949	0.661555090793	0.588509491424	0.605876737121	0.639748483276	0.722345730699	0.669640932822	0.904656484567	0.634532035696	0.567268706479	0.821242094612	0.695227468543	0.925685777044	0.667082427889	0.651748471648	0.641527221842	0.797620276283	0.829520447037	0.7662647863 [...]
+F33Fotr.140629	0.826276511957	0.692901026744	0.69886432323	0.834260924953	0.646224322592	0.708063514819	0.791722921576	0.825923967042	0.830617287653	0.832573109688	0.837006540385	0.911315838499	0.779897705115	0.904738169727	0.696865872642	0.631737843825	0.627380764887	0.761545791715	0.649914727882	0.654056394058	0.927662995759	0.593230948446	0.694480448833	0.872271786679	0.746843616736	0.933530625285	0.66133972821	0.579944588098	0.720271568521	0.843934484261	0.911700519558	0.790189498993 [...]
+F23Fotl.140863	0.637166547728	0.902109755649	0.905946884172	0.921037397244	0.895102732305	0.825003618849	0.7647226503	0.921446663067	0.915306056307	0.698366063605	0.90137048684	0.952317445375	0.832788232811	0.950786488261	0.904442496029	0.90590570314	0.907908636615	0.890741863172	0.869161156647	0.908516160638	0.460600420911	0.889492484795	0.886345251661	0.591171899772	0.802418240932	0.352171352863	0.85655654498	0.927834636675	0.906829608837	0.659013818089	0.95217654489	0.741245939031	0.9 [...]
+M32Nose.140531	0.494816553578	0.811841442021	0.819953647848	0.883814147422	0.79920725338	0.717738615208	0.638401728384	0.853074073957	0.842262093093	0.582292077257	0.858091919568	0.932919560471	0.71878342536	0.925049974712	0.833957181251	0.809381557108	0.814437073819	0.804839949	0.789899903348	0.837627258167	0.648826979472	0.792036947799	0.785071438488	0.591907856396	0.636844589141	0.664704492733	0.738297423505	0.864095444159	0.823824967396	0.686343492127	0.914847651057	0.570760657029	0. [...]
+M44Fotl.140595	0.698418963197	0.705771931157	0.71499330678	0.83418825377	0.709643640317	0.614618731869	0.637337735263	0.828348064994	0.846955527789	0.65400034951	0.817479722169	0.881941590178	0.712589847734	0.877505441561	0.744210800892	0.680034079387	0.687099719538	0.73702812325	0.716317846561	0.748293972299	0.787433694905	0.719086244287	0.67118835878	0.715184624596	0.606502724455	0.796812429401	0.658806173126	0.789176523346	0.75473582737	0.54973668982	0.882415265673	0.589100372351	0.89 [...]
+M12Ewxr.140434	0.585323779617	0.877520172648	0.877566590345	0.896639152503	0.868935865769	0.800870842449	0.752485663255	0.89933354463	0.891648241889	0.643979772758	0.87166156161	0.930394847436	0.807746166718	0.928223163616	0.886582980071	0.878300273804	0.858671132089	0.868322221265	0.869180173603	0.890944696051	0.397262494107	0.868073928959	0.865432431501	0.643931619315	0.732424722774	0.574117407125	0.827357262789	0.919646626191	0.882594149121	0.610583282497	0.939413222158	0.732198509643 [...]
+M41Navl.140280	0.697751354304	0.62378713522	0.692574589759	0.765725512132	0.708626185534	0.580594193316	0.652101808558	0.745691488724	0.740093436381	0.562293199507	0.748703577424	0.850772514492	0.598216175171	0.841489881465	0.687883717667	0.663667253097	0.6742460332	0.664889448618	0.7633931846	0.715867242766	0.813546334862	0.676031163833	0.608594436186	0.663761391114	0.505527663533	0.8433482737	0.679023299192	0.767921674944	0.701739007726	0.579479514991	0.86825447189	0.634070085146	0.889 [...]
+F31Pinl.140395	0.741653036239	0.661530403869	0.575640008546	0.63261149196	0.603716206863	0.63143732383	0.66799380077	0.607307098324	0.630621721347	0.678673116755	0.571799504611	0.823300710433	0.604538500185	0.781833009362	0.621247831143	0.609268424439	0.619579289038	0.604848838462	0.75436804439	0.646856060579	0.847642786668	0.597856435661	0.59777409433	0.78159726244	0.499821534037	0.873556979828	0.704277736942	0.746904328306	0.706935316378	0.734067242058	0.8261147834	0.595460657479	0.850 [...]
+F32Fcsp.140464	0.925774903519	0.788511878186	0.83221099497	0.816199714539	0.843150249307	0.832024998562	0.897913835969	0.838657625295	0.799873880548	0.884253864538	0.805917511496	0.63955138579	0.814960433478	0.660296623361	0.854213498754	0.846497268065	0.796035782437	0.823691841404	0.896880570586	0.860894560341	0.946150145508	0.852378641181	0.865856138941	0.923465735216	0.838080980986	0.946686458463	0.882032517611	0.822090148644	0.755835524888	0.891714644826	0.511817422427	0.868747331674 [...]
+M13Fotl.140685	0.665203627904	0.656885684307	0.686498942536	0.823518190814	0.618690013267	0.585391061135	0.635747153651	0.806635880541	0.776691098149	0.632239976294	0.805205822998	0.858053565236	0.57674893863	0.850141394917	0.706544028211	0.670688717109	0.65485487211	0.724219974169	0.707173869363	0.676593848018	0.840708665808	0.657930172009	0.586952021117	0.717788438745	0.58980631181	0.843915977069	0.627387754982	0.713623842371	0.674724221995	0.692546553083	0.840854382849	0.598559148269	 [...]
+M23Fotl.140684	0.755927106375	0.731751491481	0.700639956271	0.853125596649	0.698072405137	0.709093476706	0.729734204653	0.834914482638	0.827462272319	0.786974137679	0.869173394246	0.923358074228	0.799925164736	0.922683977684	0.714599992024	0.698791947845	0.681837741686	0.784888154311	0.581346999625	0.684073453683	0.891031672992	0.663012862902	0.710833192847	0.853612228062	0.740030014939	0.890254549603	0.614407315065	0.690436010085	0.783847193954	0.806500736179	0.919912443634	0.7412312812 [...]
+M12Forl.140812	0.588906990497	0.683738366419	0.653298945403	0.861598872177	0.667328209964	0.623534211903	0.574515218259	0.838820680884	0.837508285332	0.663544348035	0.842481363353	0.915569364964	0.687326495459	0.920192639919	0.702097030406	0.678388666446	0.671883184411	0.734605209437	0.704574933222	0.723430207529	0.829299580675	0.670169526449	0.634534033185	0.664932377455	0.662848103733	0.845068866403	0.599951925178	0.751413635518	0.733353744043	0.686680353123	0.915618736448	0.6570699509 [...]
+M24Knee.140796	0.829425370024	0.645615029408	0.640543638842	0.778157676582	0.664151114315	0.681359748297	0.757575164085	0.756951125829	0.736678462764	0.787818242043	0.759929318881	0.76930275965	0.694963166832	0.696786562218	0.683888625191	0.667414126941	0.599250667744	0.691024383286	0.760305145574	0.671377217772	0.912937655992	0.649047289739	0.670494070424	0.84377794696	0.716815762025	0.918500168027	0.734130681451	0.639291034761	0.62183682961	0.806965416544	0.754074452774	0.786348803624	 [...]
+F13Fcsw.140387	0.947340646601	0.821794705728	0.864988838189	0.808381044978	0.880210319839	0.86386165724	0.938326576643	0.828391292046	0.81306603272	0.909825629406	0.807775504427	0.707315178501	0.84320923045	0.716724575398	0.865069127041	0.875949914292	0.833680361341	0.83783450756	0.918726798873	0.864867305772	0.960334896848	0.87853892579	0.863500135855	0.946622033337	0.875306624831	0.958495927317	0.920224857242	0.839432693042	0.743782647477	0.915851789759	0.638723941654	0.900379996451	0. [...]
+M42Indl.140869	0.814662630836	0.626099173604	0.559177831823	0.651819562692	0.615083171476	0.693167134915	0.738088106438	0.61920616707	0.630650419433	0.787328444087	0.683584073241	0.890374174187	0.712285025762	0.868398235945	0.545400748626	0.60374584091	0.58692274026	0.523472585547	0.768043113138	0.574041046009	0.903355859797	0.590687289999	0.632012488617	0.824428776702	0.623176877662	0.929138617494	0.701613351576	0.704697327552	0.714469639031	0.781756862825	0.875043889966	0.715099329509	 [...]
+M42Plml.140513	0.811221659539	0.600681915008	0.560897885279	0.721308953344	0.573033136746	0.631176764391	0.68324075497	0.68679734724	0.691022315369	0.765864186066	0.696547453166	0.867312655956	0.667458807222	0.850743927786	0.579262423294	0.580557673003	0.546768070877	0.568100436125	0.735448752989	0.580416114646	0.894991326005	0.532579913396	0.593991424734	0.804101801969	0.570288724673	0.919506503231	0.651730745406	0.675020639138	0.689264628893	0.768545798579	0.86339774762	0.685735830618	 [...]
+M54Fotl.140823	0.844156268809	0.648778876434	0.660623193678	0.793010247714	0.647333246227	0.714874453961	0.786602398468	0.767864463514	0.765581688205	0.844287117657	0.781876225313	0.837596044605	0.750291074351	0.821942179843	0.678467278806	0.624806804268	0.595822254218	0.713391149612	0.732465798815	0.618138362709	0.935264436043	0.637889939442	0.666154949267	0.875440747199	0.735331448749	0.939181014283	0.6909037402	0.603348366623	0.657048047797	0.842666929452	0.837280021678	0.81083115487	 [...]
+F32Nose.140543	0.712920936395	0.757222778409	0.718189101316	0.742284454014	0.696864936765	0.674767673072	0.694510789961	0.728729925823	0.729213903843	0.614624185084	0.690446132217	0.839768053416	0.713316901546	0.81985413728	0.736959310226	0.717786354818	0.723334523044	0.714273048754	0.760221083056	0.764460315789	0.786684219068	0.718264492043	0.731765689611	0.733240984649	0.604763297852	0.827174833933	0.722005668154	0.806650943566	0.810002837663	0.660247540742	0.863179796359	0.60968052966 [...]
+M22Pinr.140692	0.738155671856	0.683157107574	0.668889328549	0.761915911563	0.626426709239	0.677309941767	0.654110649352	0.713276820232	0.735868917099	0.665543652307	0.714329957635	0.862247967251	0.67306316614	0.827291637482	0.692091246331	0.680924077187	0.675048565125	0.677337100146	0.719439045747	0.68246977825	0.850874254321	0.622089827826	0.68549799521	0.721788072017	0.634724244265	0.876607375905	0.680531639362	0.743857698908	0.723498742285	0.671170382018	0.876416245904	0.637079790446	 [...]
+F21Mout.140380	0.899524201096	0.759530486687	0.69699197334	0.244144487683	0.780921858161	0.77523309949	0.853428241403	0.35484612043	0.40416454023	0.843504737162	0.302730848656	0.831140306623	0.707926377297	0.78063825667	0.626737069093	0.709300973776	0.740122950133	0.526682602744	0.864787536485	0.652766759785	0.918978058807	0.731899019354	0.730045139852	0.903766786544	0.690314973893	0.919539542066	0.838980355284	0.827895914731	0.774480825639	0.848249040355	0.82373996469	0.77060602805	0.83 [...]
+M32Forr.140802	0.790162137368	0.685446939315	0.66060437969	0.80317113481	0.611499230041	0.72710206255	0.739150346772	0.775514326149	0.764437832881	0.794879886249	0.802470872361	0.906092489204	0.730466878655	0.882698174168	0.636209410139	0.593758005665	0.645464373213	0.735385145409	0.680823022108	0.628401801448	0.895710354492	0.614472444006	0.662613829472	0.852274081581	0.675797698586	0.913599628832	0.619039464231	0.653144720522	0.72420791042	0.820388158965	0.893312735093	0.733627618984	0 [...]
+M31Fcsw.140368	0.949176430909	0.797846585533	0.867279770226	0.794035868983	0.866168702767	0.857308018811	0.90702327454	0.827282269466	0.799729120512	0.901269662414	0.795587980977	0.668918263912	0.758392269233	0.671781082055	0.875470009479	0.86525338061	0.827014761211	0.828851171892	0.933698402826	0.858826866054	0.953993132323	0.863202040471	0.847118998326	0.92500736502	0.838432036723	0.954843464658	0.912031940839	0.836384563626	0.704422048713	0.896352152213	0.608946593473	0.888247946515	 [...]
+F11Indl.140283	0.82311880826	0.628082134593	0.625527978718	0.576613544903	0.657112800633	0.698663349453	0.741601563728	0.529099679747	0.494186603019	0.807573377718	0.593699494708	0.844897939211	0.648931638831	0.796101545117	0.426506734251	0.58655010376	0.565367986242	0.514653955994	0.752585064273	0.524592582995	0.89884460939	0.625413635015	0.664114593712	0.838476366593	0.620234679238	0.927842026365	0.742298686676	0.715030130733	0.672141144174	0.816802319009	0.839449961705	0.743762893531	 [...]
+M21Pinr.140516	0.759335224707	0.635664464308	0.629018186143	0.739220938793	0.589078046272	0.674153914755	0.728699973646	0.716663065686	0.65393855909	0.715635099709	0.70181796519	0.762204785548	0.66003569412	0.702065073286	0.67369979044	0.674971254725	0.578730734858	0.673993347066	0.758650974147	0.688609984846	0.869883671386	0.656095818716	0.67889085182	0.813320885071	0.598618792763	0.892718318351	0.721456684429	0.697318344674	0.695034660193	0.763170474371	0.757638883344	0.652539624032	0. [...]
+M23Fcsw.140821	0.938431317136	0.778468750368	0.840522532992	0.835845780261	0.86904363933	0.822431576162	0.896106995938	0.843522104456	0.807490086934	0.870322902267	0.827310174201	0.656038754196	0.773223790024	0.578303319705	0.854257124075	0.843757393065	0.791612719957	0.821811522844	0.935040101387	0.8590706759	0.92780596274	0.862591957437	0.834272034233	0.898072725783	0.826896332716	0.927259431324	0.91482012092	0.845616666403	0.742085295429	0.864873190641	0.609439530559	0.895236905758	0. [...]
+M12Frhd.140766	0.65546886615	0.652353839211	0.67217622254	0.740631998057	0.672649761155	0.606059450402	0.625255065393	0.71826130114	0.756769898892	0.617533643488	0.704322105913	0.862147418126	0.628550601956	0.887006350444	0.664006930306	0.71762593068	0.678151631939	0.611816620217	0.736903218038	0.697412247171	0.826099154408	0.647890997477	0.617102511323	0.675993758873	0.601141821912	0.844919795529	0.642563731007	0.772209193472	0.757783225196	0.610032783558	0.864947240551	0.538537465938	0 [...]
+M32Ewxr.140697	0.754785506856	0.833051541705	0.835642975391	0.785086033558	0.876933987828	0.722582204299	0.821536691693	0.802355353592	0.804313261564	0.658104240038	0.74659659652	0.882004505328	0.699976085598	0.867444641357	0.824784463849	0.837336677684	0.834987828596	0.791097257241	0.859171339193	0.856232891095	0.615572524481	0.82947679392	0.779360383671	0.722687834393	0.659104836308	0.679721222663	0.844732423353	0.88892809625	0.837405024713	0.65949303135	0.898563908585	0.72801403734	0. [...]
+M44Ewax.140411	0.71914342806	0.838208709664	0.854566986838	0.886342964736	0.861686819914	0.767486218714	0.700082915542	0.88391410298	0.88615648584	0.650148035096	0.865408775925	0.942728553193	0.705310308067	0.941109645182	0.858122772786	0.856789545776	0.8384529248	0.81924887857	0.893986789133	0.868221939198	0.519566707099	0.836412927948	0.805942495645	0.50088974371	0.693737883123	0.670573703205	0.827355565186	0.894592872621	0.84332129907	0.573293428426	0.9303496518	0.68965539008	0.951247 [...]
+M11Frhd.140548	0.666230042575	0.650886159204	0.628326536368	0.769039685734	0.677822650737	0.595534078721	0.66621486955	0.734035790201	0.75008317278	0.604732600914	0.738530547663	0.879225643228	0.617178322248	0.863541074522	0.697001114917	0.663167410304	0.662457383832	0.652540250673	0.741278535629	0.69966779083	0.834142093616	0.6263611335	0.609966095198	0.739185387979	0.56411950519	0.865046161989	0.651956809528	0.758305573935	0.707709712985	0.648935437733	0.863797707177	0.633123012933	0.8 [...]
+M22Aptr.140827	0.741765906229	0.652582416266	0.642831239853	0.725537326515	0.677463786906	0.573894616724	0.597341597463	0.705555223243	0.718976489696	0.663992736595	0.696632769577	0.810907750785	0.556605972963	0.770710153937	0.668035453293	0.6173955283	0.637937745229	0.622209607837	0.755379261894	0.698834304027	0.854445952357	0.644336058221	0.652953920728	0.729385546004	0.549686467374	0.870367571866	0.713861030524	0.739203281193	0.669861129348	0.695302934161	0.795772782922	0.629227632134 [...]
+F31Fcsp.140571	0.926900224705	0.761917157139	0.826611905329	0.801883773719	0.833675874754	0.842915872796	0.898542826256	0.825396646347	0.789322745259	0.885997954715	0.789892582842	0.668250149161	0.817534870481	0.632464370847	0.849311902567	0.842126474642	0.794942360259	0.812791726095	0.900466022345	0.846162986372	0.947037178884	0.840859622095	0.861579241308	0.924669672867	0.833905043279	0.947616105299	0.890459917984	0.829498151063	0.770524000359	0.893383717185	0.542092244553	0.8708444899 [...]
+M22Pinl.140526	0.76901199084	0.58830726628	0.639099217296	0.689080475572	0.65289291384	0.632291939367	0.701371119466	0.677840849478	0.621596110677	0.726583415247	0.665994708443	0.75104531194	0.601587130679	0.651633522421	0.655769884306	0.633987150913	0.560297253543	0.610509555306	0.772877615214	0.630149744965	0.885979652053	0.656322247658	0.631606838723	0.776270484405	0.590283163898	0.900207487248	0.731145785232	0.706000916362	0.585921450927	0.744992993157	0.7488771563	0.732111317963	0.7 [...]
+M21Navl.140850	0.568196132503	0.715831484467	0.719001282947	0.744258428429	0.758570344346	0.544399358717	0.631541216264	0.724283309563	0.716684843781	0.448676469018	0.697841042512	0.860767058857	0.575692366857	0.851127038472	0.712503895232	0.696310904576	0.694164445924	0.673956562162	0.778640525787	0.71609708282	0.766567353435	0.675319964135	0.680612315713	0.569661952124	0.451421599692	0.79524206464	0.704113913461	0.802676428688	0.708961218417	0.553251435998	0.85282112647	0.565059319853	 [...]
+F22Frhd.140836	0.678450420663	0.722331335746	0.713071950254	0.687827646518	0.732707210703	0.608703401663	0.743266034281	0.668024290156	0.648147734001	0.601978832489	0.640494883594	0.820733905113	0.608815958633	0.792006966301	0.732651376499	0.711487448393	0.712094495856	0.684150230817	0.741179183216	0.733113551658	0.756352642422	0.676146733063	0.671106257855	0.744226899812	0.604434162504	0.795871866009	0.710072685026	0.798516866089	0.762581003964	0.62505209424	0.840508232796	0.63395302469 [...]
\ No newline at end of file
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/map.txt b/ipynbs/presentations/2014.05.13-ElBrogrammer/map.txt
new file mode 100644
index 0000000..abf859f
--- /dev/null
+++ b/ipynbs/presentations/2014.05.13-ElBrogrammer/map.txt
@@ -0,0 +1,466 @@
+#SampleID	BarcodeSequence	ORIGINAL_SAMPLE_SITE	COMMON_SAMPLE_SITE	COMMON_NAME	TAXON_ID	BODY_SITE	BODY_SITE_CLEAN	BODY_SITE_COARSE	RUN_DATE	BIOLOGICAL_SPECIMEN	RUN_PREFIX	SEX	RUN_ALIAS	ANONYMIZED_NAME	POOL_PROPORTION	BODY_HABITAT	POOL_MEMBER_NAME	BARCODE_READ_GROUP_TAG	ANATOMICAL_SAMPLE_SITE	HOST_INDIVIDUAL	Description
+M12Aptr.140800	ACGACGTCTTAG	right axilla	right armpit	M12Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	M12Aptr	FFLHOYS	male	FFLHOYS	sample322	0.005	UBERON:skin	M12Aptr	M12Aptr	FMA:Right axilla	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Kner.140735	CTGGCTGTATGA	right popliteal fossa	right back of knees	M41Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M41Kner	FFO92CG	male	FFO92CG	sample341	0.006	UBERON:skin	M41Kner	M41Kner	FMA:Right popliteal fossa	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Plmr.140433	GCGTATCTTGAT	right palm	right palm	F24Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F24Plmr	FKB0RMH	female	FKB0RMH	sample419	0.006	UBERON:skin	F24Plmr	F24Plmr	FMA:Right palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Tong.140327	TACTTCGCTCGC	dorsal surface of tongue	tongue	M53Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M53Tong	FKB0RMH	male	FKB0RMH	sample549	0.006	UBERON:oral cavity	M53Tong	M53Tong	FMA:Dorsal surface of tongue	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Indl.140679	GCAATAGCTGCT	left palmar index finger	left index finger	F31Indl	539665	UBERON:skin of finger	skin of finger	skin	8/20/08	F31Indl	FFO92CG	female	FFO92CG	sample139	0.006	UBERON:skin	F31Indl	F31Indl	FMA:Left index finger	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Fotl.140701	GGATCGCAGATC	left plantar foot	left sole of foot	M33Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M33Fotl	FKB0RMH	male	FKB0RMH	sample243	0.006	UBERON:skin	M33Fotl	M33Fotl	FMA:Left foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Aptl.140686	ATATCGCTACTG	left axilla	left armpit	F21Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	F21Aptl	FFLHOYS	female	FFLHOYS	sample095	0.005	UBERON:skin	F21Aptl	F21Aptl	FMA:Left axilla	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Mout.140603	GCTCAGTGCAGA	oral cavity	mouth	F32Mout	447426	UBERON:mouth	mouth	oral	8/20/08	F32Mout	FFO92CG	female	FFO92CG	sample258	0.006	UBERON:oral cavity	F32Mout	F32Mout	FMA:Oral cavity	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Mout.140541	CGAGGCTCAGTA	oral cavity	mouth	M22Mout	447426	UBERON:mouth	mouth	oral	8/20/08	M22Mout	FFO92CG	male	FFO92CG	sample262	0.006	UBERON:oral cavity	M22Mout	M22Mout	FMA:Oral cavity	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Knel.140324	GCCTATACTACA	left popliteal fossa	left back of knees	F32Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	F32Knel	FFO92CG	female	FFO92CG	sample112	0.006	UBERON:skin	F32Knel	F32Knel	FMA:Left popliteal fossa	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Navl.140828	AGTGTCACGGTG	umbilicus	navel	F12Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/18/08	F12Navl	FFLHOYS	female	FFLHOYS	sample268	0.005	UBERON:skin	F12Navl	F12Navl	FMA:Umbilicus	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Tong.140848	GTCTGGATAGCG	dorsal surface of tongue	tongue	F34Tong	447426	UBERON:tongue	tongue	oral	11/14/08	F34Tong	FKB0RMH	female	FKB0RMH	sample532	0.006	UBERON:oral cavity	F34Tong	F34Tong	FMA:Dorsal surface of tongue	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Fcsw.140426	TAGATAGCAGGA	stool	stool	M53Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M53Fcsw	FKB0RMH	male	FKB0RMH	sample517	0.006	UBERON:feces	M53Fcsw	M53Fcsw	FMA:Feces	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Frhd.140580	GCTTACATCGAG	forehead	forehead	M33Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M33Frhd	FKB0RMH	male	FKB0RMH	sample055	0.006	UBERON:skin	M33Frhd	M33Frhd	FMA:Forehead	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Fcsp.140527	CCTAGTACTGAT	stool	stool	M22Fcsp	408170	UBERON:feces	feces	gut	8/20/08	M22Fcsp	FFO92CG	male	FFO92CG	sample502	0.006	UBERON:feces	M22Fcsp	M22Fcsp	FMA:Feces	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Indr.140809	AGATCTCTGCAT	right palmar index finger	right index finger	F11Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	F11Indr	FFLHOYS	female	FFLHOYS	sample357	0.005	UBERON:skin	F11Indr	F11Indr	FMA:Right index finger	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Plmr.140786	GAGCATTCTCTA	right palm	right palm	F14Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F14Plmr	FKB0RMH	female	FKB0RMH	sample415	0.006	UBERON:skin	F14Plmr	F14Plmr	FMA:Right palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Plmr.140833	GCAATAGCTGCT	right palm	right palm	M24Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M24Plmr	FKB0RMH	male	FKB0RMH	sample431	0.006	UBERON:skin	M24Plmr	M24Plmr	FMA:Right palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Plmr.140505	CGTTACTAGAGC	right palm	right palm	M31Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M31Plmr	FFLHOYS	male	FFLHOYS	sample432	0.005	UBERON:skin	M31Plmr	M31Plmr	FMA:Right palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Fotr.140344	GATCTCATAGGC	right plantar foot	right sole of foot	M23Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M23Fotr	FKB0RMH	male	FKB0RMH	sample462	0.006	UBERON:skin	M23Fotr	M23Fotr	FMA:Right foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Forr.140302	AGTCCATAGCTG	right volar forearm	right forearm	F12Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F12Forr	FFLHOYS	female	FFLHOYS	sample344	0.005	UBERON:skin	F12Forr	F12Forr	FMA:Surface of right arm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Ewax.140650	GTGTGCTATCAG	external auditory canal	outer ear canal/earwax	M43Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M43Ewax	FKB0RMH	male	FKB0RMH	sample309	0.006	UBERON:external auditory canal	M43Ewax	M43Ewax	FMA:External auditory canal	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Plml.140704	ATGCACTGGCGA	left palm	left palm	F21Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F21Plml	FFLHOYS	female	FFLHOYS	sample193	0.005	UBERON:skin	F21Plml	F21Plml	FMA:Left palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Knee.140744	GCTAGTCTGAAC	popliteal fossae	back of knees	F24Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	F24Knee	FKB0RMH	female	FKB0RMH	sample004	0.006	UBERON:skin	F24Knee	F24Knee	FMA:Popliteal fossa	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Nose.140428	ACAGTGCTTCAT	external nose	external nose	M11Nose	646099	UBERON:nose	nose	skin	8/18/08	M11Nose	FFLHOYS	male	FFLHOYS	sample025	0.005	UBERON:skin	M11Nose	M11Nose	FMA:External nose	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Ewxr.140726	AGTACGCTCGAG	right external auditory canal	right outer ear canal/earwax	F12Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F12Ewxr	FFLHOYS	female	FFLHOYS	sample400	0.005	UBERON:external auditory canal	F12Ewxr	F12Ewxr	FMA:Right external auditory canal	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Pinl.140284	GACATCGGCTAT	left lateral pinna	left outer ear	M41Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M41Pinl	FFO92CG	male	FFO92CG	sample174	0.006	UBERON:skin	M41Pinl	M41Pinl	FMA:Surface of left pinna	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Fotr.140485	ATGGCAGCTCTA	right plantar foot	right sole of foot	F21Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F21Fotr	FFLHOYS	female	FFLHOYS	sample448	0.005	UBERON:skin	F21Fotr	F21Fotr	FMA:Right foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Plml.140452	GCGTTACACACA	left palm	left palm	F24Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F24Plml	FKB0RMH	female	FKB0RMH	sample196	0.006	UBERON:skin	F24Plml	F24Plml	FMA:Left palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Fotl.140777	TCAGCCATGACA	left plantar foot	left sole of foot	M64Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M64Fotl	FKB0RMH	male	FKB0RMH	sample252	0.006	UBERON:skin	M64Fotl	M64Fotl	FMA:Left foot surface	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Kner.140705	ATGTGTCGACTT	right popliteal fossa	right back of knees	F22Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F22Kner	FFLHOYS	female	FFLHOYS	sample332	0.005	UBERON:skin	F22Kner	F22Kner	FMA:Right popliteal fossa	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Aptl.140841	CTGCTGCGAAGA	left axilla	left armpit	M41Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/20/08	M41Aptl	FFO92CG	male	FFO92CG	sample105	0.006	UBERON:skin	M41Aptl	M41Aptl	FMA:Left axilla	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Fcsp.140668	ACGCGCAGATAC	stool	stool	M12Fcsp	408170	UBERON:feces	feces	gut	8/18/08	M12Fcsp	FFLHOYS	male	FFLHOYS	sample496	0.005	UBERON:feces	M12Fcsp	M12Fcsp	FMA:Feces	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Frhd.140776	CTATGCTTGATG	forehead	forehead	M32Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	M32Frhd	FFLHOYS	male	FFLHOYS	sample054	0.005	UBERON:skin	M32Frhd	M32Frhd	FMA:Forehead	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Fcsw.140714	GTTCGCGTATAG	stool	stool	M43Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M43Fcsw	FKB0RMH	male	FKB0RMH	sample515	0.006	UBERON:feces	M43Fcsw	M43Fcsw	FMA:Feces	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Knee.140412	GTGGCGATACAC	popliteal fossae	back of knees	M43Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M43Knee	FKB0RMH	male	FKB0RMH	sample013	0.006	UBERON:skin	M43Knee	M43Knee	FMA:Popliteal fossa	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Tong.140430	GATACGTCCTGA	dorsal surface of tongue	tongue	F14Tong	447426	UBERON:tongue	tongue	oral	11/14/08	F14Tong	FKB0RMH	female	FKB0RMH	sample524	0.006	UBERON:oral cavity	F14Tong	F14Tong	FMA:Dorsal surface of tongue	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Fcsp.140783	CAACACGCACGA	stool	stool	F22Fcsp	408170	UBERON:feces	feces	gut	8/18/08	F22Fcsp	FFLHOYS	female	FFLHOYS	sample484	0.005	UBERON:feces	F22Fcsp	F22Fcsp	FMA:Feces	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Ewax.140285	GTCTGACAGTTG	external auditory canal	outer ear canal/earwax	F34Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	F34Ewax	FKB0RMH	female	FKB0RMH	sample302	0.006	UBERON:external auditory canal	F34Ewax	F34Ewax	FMA:External auditory canal	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Fotr.140467	TATGCACCAGTG	right plantar foot	right sole of foot	M63Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M63Fotr	FKB0RMH	male	FKB0RMH	sample474	0.006	UBERON:skin	M63Fotr	M63Fotr	FMA:Right foot surface	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Tong.140358	GTCATATCGTAC	dorsal surface of tongue	tongue	F33Tong	447426	UBERON:tongue	tongue	oral	11/14/08	F33Tong	FKB0RMH	female	FKB0RMH	sample531	0.006	UBERON:oral cavity	F33Tong	F33Tong	FMA:Dorsal surface of tongue	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Frhd.140695	TAGCATCGTGGT	forehead	forehead	M54Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M54Frhd	FKB0RMH	male	FKB0RMH	sample062	0.006	UBERON:skin	M54Frhd	M54Frhd	FMA:Forehead	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Knel.140873	CGCTAGAACGCA	left popliteal fossa	left back of knees	M31Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M31Knel	FFLHOYS	male	FFLHOYS	sample117	0.005	UBERON:skin	M31Knel	M31Knel	FMA:Left popliteal fossa	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Ewxr.140289	GATCTCATAGGC	right external auditory canal	right outer ear canal/earwax	F31Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	F31Ewxr	FFO92CG	female	FFO92CG	sample403	0.006	UBERON:external auditory canal	F31Ewxr	F31Ewxr	FMA:Right external auditory canal	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Frhd.140444	GCTAAGAGAGTA	forehead	forehead	F32Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/20/08	F32Frhd	FFO92CG	female	FFO92CG	sample042	0.006	UBERON:skin	F32Frhd	F32Frhd	FMA:Forehead	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Plmr.140826	GGTCACTGACAG	right palm	right palm	M34Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M34Plmr	FKB0RMH	male	FKB0RMH	sample435	0.006	UBERON:skin	M34Plmr	M34Plmr	FMA:Right palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Knee.140816	GCCACTGATAGT	popliteal fossae	back of knees	F23Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	F23Knee	FKB0RMH	female	FKB0RMH	sample003	0.006	UBERON:skin	F23Knee	F23Knee	FMA:Popliteal fossa	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Plml.140636	TAGCACACCTAT	left palm	left palm	M54Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M54Plml	FKB0RMH	male	FKB0RMH	sample218	0.006	UBERON:skin	M54Plml	M54Plml	FMA:Left palm	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Indr.140519	GAGATGCCGACT	right palmar index finger	right index finger	M42Indr	539669	UBERON:skin of finger	skin of finger	skin	8/20/08	M42Indr	FFO92CG	male	FFO92CG	sample370	0.006	UBERON:skin	M42Indr	M42Indr	FMA:Right index finger	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Pinr.140328	GCTTCATAGTGT	right lateral pinna	right outer ear	F32Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	F32Pinr	FFO92CG	female	FFO92CG	sample390	0.006	UBERON:skin	F32Pinr	F32Pinr	FMA:Surface of right pinna	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Forl.140496	CTGTTCGTAGAG	left volar forearm	left forearm	M41Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M41Forl	FFO92CG	male	FFO92CG	sample133	0.006	UBERON:skin	M41Forl	M41Forl	FMA:Surface of left arm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Plml.140691	GACAGGAGATAG	left palm	left palm	M41Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M41Plml	FFO92CG	male	FFO92CG	sample213	0.006	UBERON:skin	M41Plml	M41Plml	FMA:Left palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Ewax.140872	GTCACGACTATT	external auditory canal	outer ear canal/earwax	F33Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	F33Ewax	FKB0RMH	female	FKB0RMH	sample301	0.006	UBERON:external auditory canal	F33Ewax	F33Ewax	FMA:External auditory canal	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Pinr.140457	GAGTGGTAGAGA	right lateral pinna	right outer ear	M42Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M42Pinr	FFO92CG	male	FFO92CG	sample398	0.006	UBERON:skin	M42Pinr	M42Pinr	FMA:Surface of right pinna	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Frhd.140404	GATCGCAGGTGT	forehead	forehead	M23Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M23Frhd	FKB0RMH	male	FKB0RMH	sample051	0.006	UBERON:skin	M23Frhd	M23Frhd	FMA:Forehead	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Plmr.140648	GATCAGAAGATG	right palm	right palm	M23Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M23Plmr	FKB0RMH	male	FKB0RMH	sample430	0.006	UBERON:skin	M23Plmr	M23Plmr	FMA:Right palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Plml.140709	GTATATCCGCAG	left palm	left palm	F33Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F33Plml	FKB0RMH	female	FKB0RMH	sample199	0.006	UBERON:skin	F33Plml	F33Plml	FMA:Left palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Fotl.140774	ACTCTTCTAGAG	left plantar foot	left sole of foot	M12Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M12Fotl	FFLHOYS	male	FFLHOYS	sample234	0.005	UBERON:skin	M12Fotl	M12Fotl	FMA:Left foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Fotl.140801	GACCGAGCTATG	left plantar foot	left sole of foot	M41Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M41Fotl	FFO92CG	male	FFO92CG	sample245	0.006	UBERON:skin	M41Fotl	M41Fotl	FMA:Left foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Ewxr.140787	CCGATGTCAGAT	right external auditory canal	right outer ear canal/earwax	M22Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M22Ewxr	FFO92CG	male	FFO92CG	sample407	0.006	UBERON:external auditory canal	M22Ewxr	M22Ewxr	FMA:Right external auditory canal	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Pinl.140316	ACATTCAGCGCA	left lateral pinna	left outer ear	M11Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M11Pinl	FFLHOYS	male	FFLHOYS	sample168	0.005	UBERON:skin	M11Pinl	M11Pinl	FMA:Surface of left pinna	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Fotl.140376	TACTGGACGCGA	left plantar foot	left sole of foot	M53Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M53Fotl	FKB0RMH	male	FKB0RMH	sample249	0.006	UBERON:skin	M53Fotl	M53Fotl	FMA:Left foot surface	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Pinr.140498	ATACGTCTTCGA	right lateral pinna	right outer ear	F12Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F12Pinr	FFLHOYS	female	FFLHOYS	sample386	0.005	UBERON:skin	F12Pinr	F12Pinr	FMA:Surface of right pinna	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Fcsw.140400	AGTAGTATCCTC	stool	stool	F12Fcsw	408170	UBERON:feces	feces	gut	8/18/08	F12Fcsw	FFLHOYS	female	FFLHOYS	sample479	0.005	UBERON:feces	F12Fcsw	F12Fcsw	FMA:Feces	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Fotl.140856	GTCTCTCTACGC	left plantar foot	left sole of foot	F34Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F34Fotl	FKB0RMH	female	FKB0RMH	sample232	0.006	UBERON:skin	F34Fotl	F34Fotl	FMA:Left foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Indr.140556	AGTGCGATGCGT	right palmar index finger	right index finger	F12Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	F12Indr	FFLHOYS	female	FFLHOYS	sample358	0.005	UBERON:skin	F12Indr	F12Indr	FMA:Right index finger	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Pinl.140313	CATGAGTGCTAC	left lateral pinna	left outer ear	M21Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M21Pinl	FFO92CG	male	FFO92CG	sample170	0.006	UBERON:skin	M21Pinl	M21Pinl	FMA:Surface of left pinna	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Plml.140601	GACTCGAATCGT	left palm	left palm	F13Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F13Plml	FKB0RMH	female	FKB0RMH	sample191	0.006	UBERON:skin	F13Plml	F13Plml	FMA:Left palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Indl.140291	CGTCACGACTAA	left palmar index finger	left index finger	M31Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	M31Indl	FFLHOYS	male	FFLHOYS	sample145	0.005	UBERON:skin	M31Indl	M31Indl	FMA:Left index finger	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Forr.140497	CGTAGAACGTGC	right volar forearm	right forearm	M31Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M31Forr	FFLHOYS	male	FFLHOYS	sample353	0.005	UBERON:skin	M31Forr	M31Forr	FMA:Surface of right arm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Plml.140737	GCTGTAGTATGC	left palm	left palm	F32Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	F32Plml	FFO92CG	female	FFO92CG	sample198	0.006	UBERON:skin	F32Plml	F32Plml	FMA:Left palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Kner.140429	CTAGAGACTCTT	right popliteal fossa	right back of knees	M32Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M32Kner	FFLHOYS	male	FFLHOYS	sample340	0.005	UBERON:skin	M32Kner	M32Kner	FMA:Right popliteal fossa	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Frhd.140449	CTTGATGCGTAT	forehead	forehead	M41Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/20/08	M41Frhd	FFO92CG	male	FFO92CG	sample057	0.006	UBERON:skin	M41Frhd	M41Frhd	FMA:Forehead	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Plml.140651	GCACATCGAGCA	left palm	left palm	M24Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M24Plml	FKB0RMH	male	FKB0RMH	sample208	0.006	UBERON:skin	M24Plml	M24Plml	FMA:Left palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Forl.140305	AGTCACATCACT	left volar forearm	left forearm	F12Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F12Forl	FFLHOYS	female	FFLHOYS	sample122	0.005	UBERON:skin	F12Forl	F12Forl	FMA:Surface of left arm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Plml.140817	ACTCACGGTATG	left palm	left palm	M12Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M12Plml	FFLHOYS	male	FFLHOYS	sample202	0.005	UBERON:skin	M12Plml	M12Plml	FMA:Left palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Tong.140538	ATAGCTCCATAC	dorsal surface of tongue	tongue	F12Tong	447426	UBERON:tongue	tongue	oral	8/18/08	F12Tong	FFLHOYS	female	FFLHOYS	sample522	0.005	UBERON:oral cavity	F12Tong	F12Tong	FMA:Dorsal surface of tongue	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Fotr.140667	TAGCGGATCACG	right plantar foot	right sole of foot	M54Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M54Fotr	FKB0RMH	male	FKB0RMH	sample473	0.006	UBERON:skin	M54Fotr	M54Fotr	FMA:Right foot surface	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Mout.140583	AGCACACCTACA	oral cavity	mouth	F11Mout	447426	UBERON:mouth	mouth	oral	8/18/08	F11Mout	FFLHOYS	female	FFLHOYS	sample253	0.005	UBERON:oral cavity	F11Mout	F11Mout	FMA:Oral cavity	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Forl.140592	CTATCAGTGTAC	left volar forearm	left forearm	M32Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M32Forl	FFLHOYS	male	FFLHOYS	sample132	0.005	UBERON:skin	M32Forl	M32Forl	FMA:Surface of left arm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Fotr.140394	TACACACATGGC	right plantar foot	right sole of foot	M44Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M44Fotr	FKB0RMH	male	FKB0RMH	sample471	0.006	UBERON:skin	M44Fotr	M44Fotr	FMA:Right foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Frhd.140508	AGAGTCCTGAGC	forehead	forehead	F11Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	F11Frhd	FFLHOYS	female	FFLHOYS	sample033	0.005	UBERON:skin	F11Frhd	F11Frhd	FMA:Forehead	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Frhd.140649	GACTGATCATCT	forehead	forehead	F13Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	F13Frhd	FKB0RMH	female	FKB0RMH	sample035	0.006	UBERON:skin	F13Frhd	F13Frhd	FMA:Forehead	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Fcsw.140493	CAGTACGATCTT	stool	stool	M21Fcsw	408170	UBERON:feces	feces	gut	8/20/08	M21Fcsw	FFO92CG	male	FFO92CG	sample501	0.006	UBERON:feces	M21Fcsw	M21Fcsw	FMA:Feces	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Aptr.140474	CGCGTAACTGTA	right axilla	right armpit	M31Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	M31Aptr	FFLHOYS	male	FFLHOYS	sample325	0.005	UBERON:skin	M31Aptr	M31Aptr	FMA:Right axilla	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Forl.140337	ACACATGTCTAC	left volar forearm	left forearm	M11Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M11Forl	FFLHOYS	male	FFLHOYS	sample127	0.005	UBERON:skin	M11Forl	M11Forl	FMA:Surface of left arm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Fotl.140294	ATACTATTGCGC	left plantar foot	left sole of foot	F12Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F12Fotl	FFLHOYS	female	FFLHOYS	sample222	0.005	UBERON:skin	F12Fotl	F12Fotl	FMA:Left foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Frhd.140858	GCTAAGAGAGTA	forehead	forehead	F24Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	F24Frhd	FKB0RMH	female	FKB0RMH	sample040	0.006	UBERON:skin	F24Frhd	F24Frhd	FMA:Forehead	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Plml.140352	CATCGTATCAAC	left palm	left palm	M21Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M21Plml	FFO92CG	male	FFO92CG	sample205	0.006	UBERON:skin	M21Plml	M21Plml	FMA:Left palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Fcsp.140536	ATCCTCAGTAGT	stool	stool	F21Fcsp	408170	UBERON:feces	feces	gut	8/18/08	F21Fcsp	FFLHOYS	female	FFLHOYS	sample482	0.005	UBERON:feces	F21Fcsp	F21Fcsp	FMA:Feces	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Plml.140525	GAGCTGGCTGAT	left palm	left palm	F14Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F14Plml	FKB0RMH	female	FKB0RMH	sample192	0.006	UBERON:skin	F14Plml	F14Plml	FMA:Left palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Plmr.140782	GACAGTTACTGC	right palm	right palm	M41Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M41Plmr	FFO92CG	male	FFO92CG	sample436	0.006	UBERON:skin	M41Plmr	M41Plmr	FMA:Right palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Kner.140321	CGCTTATCGAGA	right popliteal fossa	right back of knees	M31Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M31Kner	FFLHOYS	male	FFLHOYS	sample339	0.005	UBERON:skin	M31Kner	M31Kner	FMA:Right popliteal fossa	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Fotr.140680	GCCAGAGTCGTA	right plantar foot	right sole of foot	F23Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F23Fotr	FKB0RMH	female	FKB0RMH	sample450	0.006	UBERON:skin	F23Fotr	F23Fotr	FMA:Right foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Tong.140309	CTGAGCAGAGTC	dorsal surface of tongue	tongue	M32Tong	447426	UBERON:tongue	tongue	oral	8/18/08	M32Tong	FFLHOYS	male	FFLHOYS	sample542	0.005	UBERON:oral cavity	M32Tong	M32Tong	FMA:Dorsal surface of tongue	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Fotl.140421	CACTGTAGGACG	left plantar foot	left sole of foot	F22Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F22Fotl	FFLHOYS	female	FFLHOYS	sample226	0.005	UBERON:skin	F22Fotl	F22Fotl	FMA:Left foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Fcsp.140319	CAGGTGCTACTA	stool	stool	M21Fcsp	408170	UBERON:feces	feces	gut	8/20/08	M21Fcsp	FFO92CG	male	FFO92CG	sample500	0.006	UBERON:feces	M21Fcsp	M21Fcsp	FMA:Feces	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Mout.140653	GAGCAGATGCCT	oral cavity	mouth	M42Mout	447426	UBERON:mouth	mouth	oral	8/20/08	M42Mout	FFO92CG	male	FFO92CG	sample266	0.006	UBERON:oral cavity	M42Mout	M42Mout	FMA:Oral cavity	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Fcsw.140752	GATATGCGGCTG	stool	stool	F14Fcsw	408170	UBERON:feces	feces	gut	11/14/08	F14Fcsw	FKB0RMH	female	FKB0RMH	sample481	0.006	UBERON:feces	F14Fcsw	F14Fcsw	FMA:Feces	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Fotl.140639	GACGATATCGCG	left plantar foot	left sole of foot	M14Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M14Fotl	FKB0RMH	male	FKB0RMH	sample236	0.006	UBERON:skin	M14Fotl	M14Fotl	FMA:Left foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Aptr.140530	AGCTTGACAGCT	right axilla	right armpit	F12Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	F12Aptr	FFLHOYS	female	FFLHOYS	sample316	0.005	UBERON:skin	F12Aptr	F12Aptr	FMA:Right axilla	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Fcsw.140326	ACGCTATCTGGA	stool	stool	M12Fcsw	408170	UBERON:feces	feces	gut	8/18/08	M12Fcsw	FFLHOYS	male	FFLHOYS	sample497	0.005	UBERON:feces	M12Fcsw	M12Fcsw	FMA:Feces	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Frhd.140349	CTTGATGCGTAT	forehead	forehead	M13Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M13Frhd	FKB0RMH	male	FKB0RMH	sample047	0.006	UBERON:skin	M13Frhd	M13Frhd	FMA:Forehead	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Forr.140728	CTTAGCACATCA	right volar forearm	right forearm	M41Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M41Forr	FFO92CG	male	FFO92CG	sample355	0.006	UBERON:skin	M41Forr	M41Forr	FMA:Surface of right arm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Aptr.140329	CAGATCGGATCG	right axilla	right armpit	M21Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/20/08	M21Aptr	FFO92CG	male	FFO92CG	sample323	0.006	UBERON:skin	M21Aptr	M21Aptr	FMA:Right axilla	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Fotl.140499	CATGGCTACACA	left plantar foot	left sole of foot	M21Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M21Fotl	FFO92CG	male	FFO92CG	sample237	0.006	UBERON:skin	M21Fotl	M21Fotl	FMA:Left foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Fotr.140398	GCAGCACGTTGA	right plantar foot	right sole of foot	M24Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M24Fotr	FKB0RMH	male	FKB0RMH	sample463	0.006	UBERON:skin	M24Fotr	M24Fotr	FMA:Right foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Knel.140738	AGGACGCACTGT	left popliteal fossa	left back of knees	F12Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F12Knel	FFLHOYS	female	FFLHOYS	sample108	0.005	UBERON:skin	F12Knel	F12Knel	FMA:Left popliteal fossa	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Mout.140788	CACATTGTGAGC	oral cavity	mouth	F22Mout	447426	UBERON:mouth	mouth	oral	8/18/08	F22Mout	FFLHOYS	female	FFLHOYS	sample256	0.005	UBERON:oral cavity	F22Mout	F22Mout	FMA:Oral cavity	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Ewax.140338	GATGATCGCCGA	external auditory canal	outer ear canal/earwax	M23Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M23Ewax	FKB0RMH	male	FKB0RMH	sample305	0.006	UBERON:external auditory canal	M23Ewax	M23Ewax	FMA:External auditory canal	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Fotl.140488	GAGAATACGTGA	left plantar foot	left sole of foot	F13Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F13Fotl	FKB0RMH	female	FKB0RMH	sample223	0.006	UBERON:skin	F13Fotl	F13Fotl	FMA:Left foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Fcsw.140818	TCACAGATCCGA	stool	stool	M63Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M63Fcsw	FKB0RMH	male	FKB0RMH	sample519	0.006	UBERON:feces	M63Fcsw	M63Fcsw	FMA:Feces	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Fotl.140659	GTGTCTACATTG	left plantar foot	left sole of foot	M43Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M43Fotl	FKB0RMH	male	FKB0RMH	sample247	0.006	UBERON:skin	M43Fotl	M43Fotl	FMA:Left foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Navl.140840	ATGACCATCGTG	umbilicus	navel	F21Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/18/08	F21Navl	FFLHOYS	female	FFLHOYS	sample269	0.005	UBERON:skin	F21Navl	F21Navl	FMA:Umbilicus	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Indr.140675	GCACATCGAGCA	right palmar index finger	right index finger	F31Indr	539669	UBERON:skin of finger	skin of finger	skin	8/20/08	F31Indr	FFO92CG	female	FFO92CG	sample361	0.006	UBERON:skin	F31Indr	F31Indr	FMA:Right index finger	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Fotl.140715	GCATATAGTCTC	left plantar foot	left sole of foot	F31Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	F31Fotl	FFO92CG	female	FFO92CG	sample229	0.006	UBERON:skin	F31Fotl	F31Fotl	FMA:Left foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Ewax.140489	GACGCAGTAGCT	external auditory canal	outer ear canal/earwax	M14Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M14Ewax	FKB0RMH	male	FKB0RMH	sample304	0.006	UBERON:external auditory canal	M14Ewax	M14Ewax	FMA:External auditory canal	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Plml.140507	CTCTCTACCTGT	left palm	left palm	M32Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M32Plml	FFLHOYS	male	FFLHOYS	sample210	0.005	UBERON:skin	M32Plml	M32Plml	FMA:Left palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Fcsw.140646	CTATAGTCGTGT	stool	stool	M32Fcsw	408170	UBERON:feces	feces	gut	8/18/08	M32Fcsw	FFLHOYS	male	FFLHOYS	sample509	0.005	UBERON:feces	M32Fcsw	M32Fcsw	FMA:Feces	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Frhd.140286	TATCAGGTGTGC	forehead	forehead	M63Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M63Frhd	FKB0RMH	male	FKB0RMH	sample063	0.006	UBERON:skin	M63Frhd	M63Frhd	FMA:Forehead	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Frhd.140825	GACATCGGCTAT	forehead	forehead	M14Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M14Frhd	FKB0RMH	male	FKB0RMH	sample048	0.006	UBERON:skin	M14Frhd	M14Frhd	FMA:Forehead	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Fcsw.140276	CCTCTCGTGATC	stool	stool	M22Fcsw	408170	UBERON:feces	feces	gut	8/20/08	M22Fcsw	FFO92CG	male	FFO92CG	sample503	0.006	UBERON:feces	M22Fcsw	M22Fcsw	FMA:Feces	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Indr.140459	GCTATCACGAGT	right palmar index finger	right index finger	F32Indr	539669	UBERON:skin of finger	skin of finger	skin	8/20/08	F32Indr	FFO92CG	female	FFO92CG	sample362	0.006	UBERON:skin	F32Indr	F32Indr	FMA:Right index finger	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Fcsw.140447	GACTGCATCTTA	stool	stool	M42Fcsw	408170	UBERON:feces	feces	gut	8/20/08	M42Fcsw	FFO92CG	male	FFO92CG	sample514	0.006	UBERON:feces	M42Fcsw	M42Fcsw	FMA:Feces	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Plmr.140815	TAGTTGCGAGTC	right palm	right palm	M63Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M63Plmr	FKB0RMH	male	FKB0RMH	sample442	0.006	UBERON:skin	M63Plmr	M63Plmr	FMA:Right palm	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Plmr.140356	TCACGATTAGCG	right palm	right palm	M64Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M64Plmr	FKB0RMH	male	FKB0RMH	sample443	0.006	UBERON:skin	M64Plmr	M64Plmr	FMA:Right palm	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Nose.140415	CGTGATCTCTCC	external nose	external nose	M31Nose	646099	UBERON:nose	nose	skin	8/18/08	M31Nose	FFLHOYS	male	FFLHOYS	sample029	0.005	UBERON:skin	M31Nose	M31Nose	FMA:External nose	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Plmr.140674	GTAGTGTCTAGC	right palm	right palm	F33Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F33Plmr	FKB0RMH	female	FKB0RMH	sample422	0.006	UBERON:skin	F33Plmr	F33Plmr	FMA:Right palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Plmr.140357	CACTCAACAGAC	right palm	right palm	F22Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F22Plmr	FFLHOYS	female	FFLHOYS	sample417	0.005	UBERON:skin	F22Plmr	F22Plmr	FMA:Right palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Fcsp.140567	AGACTGCGTACT	stool	stool	F11Fcsp	408170	UBERON:feces	feces	gut	8/18/08	F11Fcsp	FFLHOYS	female	FFLHOYS	sample476	0.005	UBERON:feces	F11Fcsp	F11Fcsp	FMA:Feces	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Frhd.140770	GAGAATACGTGA	forehead	forehead	M42Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/20/08	M42Frhd	FFO92CG	male	FFO92CG	sample058	0.006	UBERON:skin	M42Frhd	M42Frhd	FMA:Forehead	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Indl.140814	CTCATGTACAGT	left palmar index finger	left index finger	M32Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	M32Indl	FFLHOYS	male	FFLHOYS	sample146	0.005	UBERON:skin	M32Indl	M32Indl	FMA:Left index finger	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Fotr.140665	GAGTCTGAGTCT	right plantar foot	right sole of foot	F14Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F14Fotr	FKB0RMH	female	FKB0RMH	sample447	0.006	UBERON:skin	F14Fotr	F14Fotr	FMA:Right foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Pinr.140355	CACTGGTATATC	right lateral pinna	right outer ear	F22Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F22Pinr	FFLHOYS	female	FFLHOYS	sample388	0.005	UBERON:skin	F22Pinr	F22Pinr	FMA:Surface of right pinna	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Tong.140346	GGCGACATGTAC	dorsal surface of tongue	tongue	M33Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M33Tong	FKB0RMH	male	FKB0RMH	sample543	0.006	UBERON:oral cavity	M33Tong	M33Tong	FMA:Dorsal surface of tongue	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Fotr.140597	ATACTCACTCAG	right plantar foot	right sole of foot	F12Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F12Fotr	FFLHOYS	female	FFLHOYS	sample445	0.005	UBERON:skin	F12Fotr	F12Fotr	FMA:Right foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Frhd.140638	GGTGCGTGTATG	forehead	forehead	M34Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M34Frhd	FKB0RMH	male	FKB0RMH	sample056	0.006	UBERON:skin	M34Frhd	M34Frhd	FMA:Forehead	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Kner.140502	GACTAGACCAGC	right popliteal fossa	right back of knees	M42Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M42Kner	FFO92CG	male	FFO92CG	sample342	0.006	UBERON:skin	M42Kner	M42Kner	FMA:Right popliteal fossa	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Knel.140707	CCAGTGTATGCA	left popliteal fossa	left back of knees	M22Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M22Knel	FFO92CG	male	FFO92CG	sample116	0.006	UBERON:skin	M22Knel	M22Knel	FMA:Left popliteal fossa	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Pinl.140666	CTCTGCTAGCCT	left lateral pinna	left outer ear	M32Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M32Pinl	FFLHOYS	male	FFLHOYS	sample173	0.005	UBERON:skin	M32Pinl	M32Pinl	FMA:Surface of left pinna	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Fotl.140468	GTAGACTGCGTG	left plantar foot	left sole of foot	M34Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M34Fotl	FKB0RMH	male	FKB0RMH	sample244	0.006	UBERON:skin	M34Fotl	M34Fotl	FMA:Left foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Mout.140501	CATAGCGAGTTC	oral cavity	mouth	M21Mout	447426	UBERON:mouth	mouth	oral	8/20/08	M21Mout	FFO92CG	male	FFO92CG	sample261	0.006	UBERON:oral cavity	M21Mout	M21Mout	FMA:Oral cavity	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Plmr.140511	GAGTCTGAGTCT	right palm	right palm	M42Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M42Plmr	FFO92CG	male	FFO92CG	sample437	0.006	UBERON:skin	M42Plmr	M42Plmr	FMA:Right palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Fcsp.140589	AATCGTGACTCG	stool	stool	M11Fcsp	408170	UBERON:feces	feces	gut	8/18/08	M11Fcsp	FFLHOYS	male	FFLHOYS	sample494	0.005	UBERON:feces	M11Fcsp	M11Fcsp	FMA:Feces	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Ewxr.140794	CTGTCTCTCCTA	right external auditory canal	right outer ear canal/earwax	M41Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M41Ewxr	FFO92CG	male	FFO92CG	sample410	0.006	UBERON:external auditory canal	M41Ewxr	M41Ewxr	FMA:Right external auditory canal	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Nose.140308	GAGCTGGCTGAT	external nose	external nose	M42Nose	646099	UBERON:nose	nose	skin	8/20/08	M42Nose	FFO92CG	male	FFO92CG	sample032	0.006	UBERON:skin	M42Nose	M42Nose	FMA:External nose	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Nose.140465	GCAGCACGTTGA	external nose	external nose	F31Nose	646099	UBERON:nose	nose	skin	8/20/08	F31Nose	FFO92CG	female	FFO92CG	sample023	0.006	UBERON:skin	F31Nose	F31Nose	FMA:External nose	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Ewax.140448	TACTTACTGCAG	external auditory canal	outer ear canal/earwax	M53Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M53Ewax	FKB0RMH	male	FKB0RMH	sample311	0.006	UBERON:external auditory canal	M53Ewax	M53Ewax	FMA:External auditory canal	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Plmr.140579	GACAGGAGATAG	right palm	right palm	M14Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M14Plmr	FKB0RMH	male	FKB0RMH	sample427	0.006	UBERON:skin	M14Plmr	M14Plmr	FMA:Right palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Forl.140333	GATGCATGACGC	left volar forearm	left forearm	F31Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/20/08	F31Forl	FFO92CG	female	FFO92CG	sample125	0.006	UBERON:skin	F31Forl	F31Forl	FMA:Surface of left arm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Knee.140702	GTATGCGCTGTA	popliteal fossae	back of knees	F33Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	F33Knee	FKB0RMH	female	FKB0RMH	sample005	0.006	UBERON:skin	F33Knee	F33Knee	FMA:Popliteal fossa	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Fotl.140630	GCTATTCGACAT	left plantar foot	left sole of foot	F24Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F24Fotl	FKB0RMH	female	FKB0RMH	sample228	0.006	UBERON:skin	F24Fotl	F24Fotl	FMA:Left foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Mout.140852	ACAGCAGTGGTC	oral cavity	mouth	M11Mout	447426	UBERON:mouth	mouth	oral	8/18/08	M11Mout	FFLHOYS	male	FFLHOYS	sample259	0.005	UBERON:oral cavity	M11Mout	M11Mout	FMA:Oral cavity	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Aptr.140458	GATCCGACACTA	right axilla	right armpit	F31Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/20/08	F31Aptr	FFO92CG	female	FFO92CG	sample319	0.006	UBERON:skin	F31Aptr	F31Aptr	FMA:Right axilla	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Navl.140334	ACTACAGCCTAT	umbilicus	navel	M12Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/18/08	M12Navl	FFLHOYS	male	FFLHOYS	sample273	0.005	UBERON:skin	M12Navl	M12Navl	FMA:Umbilicus	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Pinl.140315	AGCGAGCTATCT	left lateral pinna	left outer ear	F11Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F11Pinl	FFLHOYS	female	FFLHOYS	sample163	0.005	UBERON:skin	F11Pinl	F11Pinl	FMA:Surface of left pinna	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Mout.140657	CTCCTACTGTCT	oral cavity	mouth	M32Mout	447426	UBERON:mouth	mouth	oral	8/18/08	M32Mout	FFLHOYS	male	FFLHOYS	sample264	0.005	UBERON:oral cavity	M32Mout	M32Mout	FMA:Oral cavity	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Fcsp.140824	CGTAAGTCTACT	stool	stool	M31Fcsp	408170	UBERON:feces	feces	gut	8/18/08	M31Fcsp	FFLHOYS	male	FFLHOYS	sample506	0.005	UBERON:feces	M31Fcsp	M31Fcsp	FMA:Feces	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Frhd.140486	GTGCAATCGACG	forehead	forehead	M43Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M43Frhd	FKB0RMH	male	FKB0RMH	sample059	0.006	UBERON:skin	M43Frhd	M43Frhd	FMA:Forehead	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Plml.140614	GTTGTATACTCG	left palm	left palm	M44Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M44Plml	FKB0RMH	male	FKB0RMH	sample216	0.006	UBERON:skin	M44Plml	M44Plml	FMA:Left palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Mout.140350	ACGTTAGCACAC	oral cavity	mouth	M12Mout	447426	UBERON:mouth	mouth	oral	8/18/08	M12Mout	FFLHOYS	male	FFLHOYS	sample260	0.005	UBERON:oral cavity	M12Mout	M12Mout	FMA:Oral cavity	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Plml.140585	GCATCGTCAACA	left palm	left palm	F23Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F23Plml	FKB0RMH	female	FKB0RMH	sample195	0.006	UBERON:skin	F23Plml	F23Plml	FMA:Left palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Tong.140808	CTACATCTAAGC	dorsal surface of tongue	tongue	M31Tong	447426	UBERON:tongue	tongue	oral	8/18/08	M31Tong	FFLHOYS	male	FFLHOYS	sample541	0.005	UBERON:oral cavity	M31Tong	M31Tong	FMA:Dorsal surface of tongue	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Navl.140293	GCTCGCTACTTC	umbilicus	navel	F32Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/20/08	F32Navl	FFO92CG	female	FFO92CG	sample271	0.006	UBERON:skin	F32Navl	F32Navl	FMA:Umbilicus	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Aptr.140377	GACGTTGCACAG	right axilla	right armpit	M42Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/20/08	M42Aptr	FFO92CG	male	FFO92CG	sample328	0.006	UBERON:skin	M42Aptr	M42Aptr	FMA:Right axilla	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Kner.140721	CAGCATGTGTTG	right popliteal fossa	right back of knees	M21Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M21Kner	FFO92CG	male	FFO92CG	sample337	0.006	UBERON:skin	M21Kner	M21Kner	FMA:Right popliteal fossa	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Forr.140708	CGAATCGACACT	right volar forearm	right forearm	M22Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M22Forr	FFO92CG	male	FFO92CG	sample352	0.006	UBERON:skin	M22Forr	M22Forr	FMA:Surface of right arm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Fcsp.140694	AGTACTGCAGGC	stool	stool	F12Fcsp	408170	UBERON:feces	feces	gut	8/18/08	F12Fcsp	FFLHOYS	female	FFLHOYS	sample478	0.005	UBERON:feces	F12Fcsp	F12Fcsp	FMA:Feces	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Navl.140483	ACAGCTAGCTTG	umbilicus	navel	M11Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/18/08	M11Navl	FFLHOYS	male	FFLHOYS	sample272	0.005	UBERON:skin	M11Navl	M11Navl	FMA:Umbilicus	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Plmr.140385	GCAGTATCACTG	right palm	right palm	F31Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	F31Plmr	FFO92CG	female	FFO92CG	sample420	0.006	UBERON:skin	F31Plmr	F31Plmr	FMA:Right palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Fotl.140555	CGCACTCTAGAA	left plantar foot	left sole of foot	M22Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M22Fotl	FFO92CG	male	FFO92CG	sample238	0.006	UBERON:skin	M22Fotl	M22Fotl	FMA:Left foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Fotl.140455	GCTTGCGAGACA	left plantar foot	left sole of foot	F32Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	F32Fotl	FFO92CG	female	FFO92CG	sample230	0.006	UBERON:skin	F32Fotl	F32Fotl	FMA:Left foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Pinr.140509	GACCACTACGAT	right lateral pinna	right outer ear	M41Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M41Pinr	FFO92CG	male	FFO92CG	sample397	0.006	UBERON:skin	M41Pinr	M41Pinr	FMA:Surface of right pinna	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Plmr.140866	ACATGTCACGTG	right palm	right palm	M11Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M11Plmr	FFLHOYS	male	FFLHOYS	sample424	0.005	UBERON:skin	M11Plmr	M11Plmr	FMA:Right palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Plmr.140656	AGCGACTGTGCA	right palm	right palm	F11Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F11Plmr	FFLHOYS	female	FFLHOYS	sample412	0.005	UBERON:skin	F11Plmr	F11Plmr	FMA:Right palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Aptl.140790	CATTGTCTGTGA	left axilla	left armpit	M22Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/20/08	M22Aptl	FFO92CG	male	FFO92CG	sample102	0.006	UBERON:skin	M22Aptl	M22Aptl	FMA:Left axilla	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Plml.140625	CGATCGAGTGTT	left palm	left palm	M22Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M22Plml	FFO92CG	male	FFO92CG	sample206	0.006	UBERON:skin	M22Plml	M22Plml	FMA:Left palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Plmr.140416	GTCGCTGTCTTC	right palm	right palm	F34Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F34Plmr	FKB0RMH	female	FKB0RMH	sample423	0.006	UBERON:skin	F34Plmr	F34Plmr	FMA:Right palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Indl.140438	ACGTGAGAGAAT	left palmar index finger	left index finger	M12Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	M12Indl	FFLHOYS	male	FFLHOYS	sample142	0.005	UBERON:skin	M12Indl	M12Indl	FMA:Left index finger	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Aptr.140847	CTACTGATATCG	right axilla	right armpit	M32Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	M32Aptr	FFLHOYS	male	FFLHOYS	sample326	0.005	UBERON:skin	M32Aptr	M32Aptr	FMA:Right axilla	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Fotr.140859	CATGTAATGCTC	right plantar foot	right sole of foot	M21Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M21Fotr	FFO92CG	male	FFO92CG	sample460	0.006	UBERON:skin	M21Fotr	M21Fotr	FMA:Right foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Fotl.140320	GCAGCCGAGTAT	left plantar foot	left sole of foot	M24Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M24Fotl	FKB0RMH	male	FKB0RMH	sample240	0.006	UBERON:skin	M24Fotl	M24Fotl	FMA:Left foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Fotr.140751	ACCAGCGACTAG	right plantar foot	right sole of foot	M11Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M11Fotr	FFLHOYS	male	FFLHOYS	sample456	0.005	UBERON:skin	M11Fotr	M11Fotr	FMA:Right foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Indr.140425	CATAGACGTTCG	right palmar index finger	right index finger	M21Indr	539669	UBERON:skin of finger	skin of finger	skin	8/20/08	M21Indr	FFO92CG	male	FFO92CG	sample365	0.006	UBERON:skin	M21Indr	M21Indr	FMA:Right index finger	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Fcsw.140551	GCGTACAACTGT	stool	stool	F23Fcsw	408170	UBERON:feces	feces	gut	11/14/08	F23Fcsw	FKB0RMH	female	FKB0RMH	sample486	0.006	UBERON:feces	F23Fcsw	F23Fcsw	FMA:Feces	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Plmr.140723	GCTGTAGTATGC	right palm	right palm	M33Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M33Plmr	FKB0RMH	male	FKB0RMH	sample434	0.006	UBERON:skin	M33Plmr	M33Plmr	FMA:Right palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Indr.140640	CACAGTGGACGT	right palmar index finger	right index finger	F22Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	F22Indr	FFLHOYS	female	FFLHOYS	sample360	0.005	UBERON:skin	F22Indr	F22Indr	FMA:Right index finger	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Indr.140633	ACAGAGTCGGCT	right palmar index finger	right index finger	M11Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	M11Indr	FFLHOYS	male	FFLHOYS	sample363	0.005	UBERON:skin	M11Indr	M11Indr	FMA:Right index finger	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Plml.140822	GCTGTGTAGGAC	left palm	left palm	M33Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M33Plml	FKB0RMH	male	FKB0RMH	sample211	0.006	UBERON:skin	M33Plml	M33Plml	FMA:Left palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Frhd.140568	GTCGTGTGTCAA	forehead	forehead	F34Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	F34Frhd	FKB0RMH	female	FKB0RMH	sample044	0.006	UBERON:skin	F34Frhd	F34Frhd	FMA:Forehead	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Knel.140275	ACGAGTGCTATC	left popliteal fossa	left back of knees	M12Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M12Knel	FFLHOYS	male	FFLHOYS	sample114	0.005	UBERON:skin	M12Knel	M12Knel	FMA:Left popliteal fossa	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Plmr.140839	ATACACGTGGCG	right palm	right palm	F12Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F12Plmr	FFLHOYS	female	FFLHOYS	sample413	0.005	UBERON:skin	F12Plmr	F12Plmr	FMA:Right palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Nose.140758	CACGGACTATAC	external nose	external nose	F22Nose	646099	UBERON:nose	nose	skin	8/18/08	F22Nose	FFLHOYS	female	FFLHOYS	sample022	0.005	UBERON:skin	F22Nose	F22Nose	FMA:External nose	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Aptr.140609	ATGTCACCGTGA	right axilla	right armpit	F22Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	F22Aptr	FFLHOYS	female	FFLHOYS	sample318	0.005	UBERON:skin	F22Aptr	F22Aptr	FMA:Right axilla	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Forr.140353	ATCGCTCGAGGA	right volar forearm	right forearm	F21Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F21Forr	FFLHOYS	female	FFLHOYS	sample345	0.005	UBERON:skin	F21Forr	F21Forr	FMA:Surface of right arm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Kner.140761	GATCGTCCAGAT	right popliteal fossa	right back of knees	F31Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	F31Kner	FFO92CG	female	FFO92CG	sample333	0.006	UBERON:skin	F31Kner	F31Kner	FMA:Right popliteal fossa	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Aptl.140296	CTACTACAGGTG	left axilla	left armpit	M32Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	M32Aptl	FFLHOYS	male	FFLHOYS	sample104	0.005	UBERON:skin	M32Aptl	M32Aptl	FMA:Left axilla	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Knee.140634	GAGTATGCAGCC	popliteal fossae	back of knees	F14Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	F14Knee	FKB0RMH	female	FKB0RMH	sample002	0.006	UBERON:skin	F14Knee	F14Knee	FMA:Popliteal fossa	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Indl.140607	GCTAGTCTGAAC	left palmar index finger	left index finger	F32Indl	539665	UBERON:skin of finger	skin of finger	skin	8/20/08	F32Indl	FFO92CG	female	FFO92CG	sample140	0.006	UBERON:skin	F32Indl	F32Indl	FMA:Left index finger	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Knel.140332	CTAGAACGCACT	left popliteal fossa	left back of knees	M32Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M32Knel	FFLHOYS	male	FFLHOYS	sample118	0.005	UBERON:skin	M32Knel	M32Knel	FMA:Left popliteal fossa	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Aptr.140618	ACTGTGACTTCA	right axilla	right armpit	F11Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	F11Aptr	FFLHOYS	female	FFLHOYS	sample315	0.005	UBERON:skin	F11Aptr	F11Aptr	FMA:Right axilla	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Plml.140689	TATACGCGCATT	left palm	left palm	M63Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M63Plml	FKB0RMH	male	FKB0RMH	sample219	0.006	UBERON:skin	M63Plml	M63Plml	FMA:Left palm	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Ewxr.140604	GACTCGAATCGT	right external auditory canal	right outer ear canal/earwax	M42Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M42Ewxr	FFO92CG	male	FFO92CG	sample411	0.006	UBERON:external auditory canal	M42Ewxr	M42Ewxr	FMA:Right external auditory canal	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Fotl.140712	CTAACGCAGTCA	left plantar foot	left sole of foot	M31Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M31Fotl	FFLHOYS	male	FFLHOYS	sample241	0.005	UBERON:skin	M31Fotl	M31Fotl	FMA:Left foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Indl.140446	ATCTCTGGCATA	left palmar index finger	left index finger	F21Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	F21Indl	FFLHOYS	female	FFLHOYS	sample137	0.005	UBERON:skin	F21Indl	F21Indl	FMA:Left index finger	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Mout.140605	GCACTCGTTAGA	oral cavity	mouth	F31Mout	447426	UBERON:mouth	mouth	oral	8/20/08	F31Mout	FFO92CG	female	FFO92CG	sample257	0.006	UBERON:oral cavity	F31Mout	F31Mout	FMA:Oral cavity	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Tong.140364	CGCAGCGGTATA	dorsal surface of tongue	tongue	M22Tong	447426	UBERON:tongue	tongue	oral	8/20/08	M22Tong	FFO92CG	male	FFO92CG	sample538	0.006	UBERON:oral cavity	M22Tong	M22Tong	FMA:Dorsal surface of tongue	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Fotr.140572	CTACACAAGCAC	right plantar foot	right sole of foot	M31Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M31Fotr	FFLHOYS	male	FFLHOYS	sample464	0.005	UBERON:skin	M31Fotr	M31Fotr	FMA:Right foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Nose.140573	ATGACTCATTCG	external nose	external nose	F21Nose	646099	UBERON:nose	nose	skin	8/18/08	F21Nose	FFLHOYS	female	FFLHOYS	sample021	0.005	UBERON:skin	F21Nose	F21Nose	FMA:External nose	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Knel.140720	ATGTGCACGACT	left popliteal fossa	left back of knees	F22Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F22Knel	FFLHOYS	female	FFLHOYS	sample110	0.005	UBERON:skin	F22Knel	F22Knel	FMA:Left popliteal fossa	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Forr.140422	AGAGTAGCTAAG	right volar forearm	right forearm	F11Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F11Forr	FFLHOYS	female	FFLHOYS	sample343	0.005	UBERON:skin	F11Forr	F11Forr	FMA:Surface of right arm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Fotr.140861	GATAGCTGTCTT	right plantar foot	right sole of foot	M42Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M42Fotr	FFO92CG	male	FFO92CG	sample469	0.006	UBERON:skin	M42Fotr	M42Fotr	FMA:Right foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Fotr.140562	CTGAGATACGCG	right plantar foot	right sole of foot	M32Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M32Fotr	FFLHOYS	male	FFLHOYS	sample465	0.005	UBERON:skin	M32Fotr	M32Fotr	FMA:Right foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Tong.140491	GAAGTCTCGCAT	dorsal surface of tongue	tongue	M13Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M13Tong	FKB0RMH	male	FKB0RMH	sample535	0.006	UBERON:oral cavity	M13Tong	M13Tong	FMA:Dorsal surface of tongue	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Plmr.140301	CTGTTCGTAGAG	right palm	right palm	M13Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M13Plmr	FKB0RMH	male	FKB0RMH	sample426	0.006	UBERON:skin	M13Plmr	M13Plmr	FMA:Right palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Indl.140576	CATACCAGTAGC	left palmar index finger	left index finger	M21Indl	539665	UBERON:skin of finger	skin of finger	skin	8/20/08	M21Indl	FFO92CG	male	FFO92CG	sample143	0.006	UBERON:skin	M21Indl	M21Indl	FMA:Left index finger	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Tong.140655	ATGGCGTGCACA	dorsal surface of tongue	tongue	F21Tong	447426	UBERON:tongue	tongue	oral	8/18/08	F21Tong	FFLHOYS	female	FFLHOYS	sample525	0.005	UBERON:oral cavity	F21Tong	F21Tong	FMA:Dorsal surface of tongue	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Forr.140849	ACGGATCGTCAG	right volar forearm	right forearm	M12Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M12Forr	FFLHOYS	male	FFLHOYS	sample350	0.005	UBERON:skin	M12Forr	M12Forr	FMA:Surface of right arm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Plmr.140716	GTGAGGTCGCTA	right palm	right palm	M43Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M43Plmr	FKB0RMH	male	FKB0RMH	sample438	0.006	UBERON:skin	M43Plmr	M43Plmr	FMA:Right palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Kner.140397	GCGACTTGTGTA	right popliteal fossa	right back of knees	F32Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	F32Kner	FFO92CG	female	FFO92CG	sample334	0.006	UBERON:skin	F32Kner	F32Kner	FMA:Right popliteal fossa	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Knel.140372	ATCACGTAGCGG	left popliteal fossa	left back of knees	F21Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F21Knel	FFLHOYS	female	FFLHOYS	sample109	0.005	UBERON:skin	F21Knel	F21Knel	FMA:Left popliteal fossa	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Plml.140743	CGTGTGATCAGG	left palm	left palm	M31Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M31Plml	FFLHOYS	male	FFLHOYS	sample209	0.005	UBERON:skin	M31Plml	M31Plml	FMA:Left palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Pinr.140867	TGCGCGAATACT	right lateral pinna	right outer ear	F11Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	F11Pinr	FKB0RMH	female	FKB0RMH	sample385	0.006	UBERON:skin	F11Pinr	F11Pinr	FMA:Surface of right pinna	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Frhd.140442	GCACGACAACAC	forehead	forehead	M24Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M24Frhd	FKB0RMH	male	FKB0RMH	sample052	0.006	UBERON:skin	M24Frhd	M24Frhd	FMA:Forehead	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Tong.140837	CAGACTCGCAGA	dorsal surface of tongue	tongue	F22Tong	447426	UBERON:tongue	tongue	oral	8/18/08	F22Tong	FFLHOYS	female	FFLHOYS	sample526	0.005	UBERON:oral cavity	F22Tong	F22Tong	FMA:Dorsal surface of tongue	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Plmr.140520	TACGCGCTGAGA	right palm	right palm	M53Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M53Plmr	FKB0RMH	male	FKB0RMH	sample440	0.006	UBERON:skin	M53Plmr	M53Plmr	FMA:Right palm	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Frhd.140834	TCACTGGCAGTA	forehead	forehead	M64Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M64Frhd	FKB0RMH	male	FKB0RMH	sample064	0.006	UBERON:skin	M64Frhd	M64Frhd	FMA:Forehead	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Plmr.140842	CATCTGTAGCGA	right palm	right palm	M21Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M21Plmr	FFO92CG	male	FFO92CG	sample428	0.006	UBERON:skin	M21Plmr	M21Plmr	FMA:Right palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Tong.140323	CATGTCTCTCCG	dorsal surface of tongue	tongue	M21Tong	447426	UBERON:tongue	tongue	oral	8/20/08	M21Tong	FFO92CG	male	FFO92CG	sample537	0.006	UBERON:oral cavity	M21Tong	M21Tong	FMA:Dorsal surface of tongue	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Knel.140676	AACTGTGCGTAC	left popliteal fossa	left back of knees	M11Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M11Knel	FFLHOYS	male	FFLHOYS	sample113	0.005	UBERON:skin	M11Knel	M11Knel	FMA:Left popliteal fossa	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Plml.140660	TACGGTATGTCT	left palm	left palm	M53Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M53Plml	FKB0RMH	male	FKB0RMH	sample217	0.006	UBERON:skin	M53Plml	M53Plml	FMA:Left palm	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Pinr.140598	ACTCGCACAGGA	right lateral pinna	right outer ear	M12Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M12Pinr	FFLHOYS	male	FFLHOYS	sample392	0.005	UBERON:skin	M12Pinr	M12Pinr	FMA:Surface of right pinna	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Pinr.140389	CTGAACGCTAGT	right lateral pinna	right outer ear	M32Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M32Pinr	FFLHOYS	male	FFLHOYS	sample396	0.005	UBERON:skin	M32Pinr	M32Pinr	FMA:Surface of right pinna	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Pinr.140773	ATGCGTAGTGCG	right lateral pinna	right outer ear	F21Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F21Pinr	FFLHOYS	female	FFLHOYS	sample387	0.005	UBERON:skin	F21Pinr	F21Pinr	FMA:Surface of right pinna	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Tong.140785	GCATGTGCATGT	dorsal surface of tongue	tongue	F31Tong	447426	UBERON:tongue	tongue	oral	8/20/08	F31Tong	FFO92CG	female	FFO92CG	sample529	0.006	UBERON:oral cavity	F31Tong	F31Tong	FMA:Dorsal surface of tongue	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Indr.140471	CGAGCAGCACAT	right palmar index finger	right index finger	M22Indr	539669	UBERON:skin of finger	skin of finger	skin	8/20/08	M22Indr	FFO92CG	male	FFO92CG	sample366	0.006	UBERON:skin	M22Indr	M22Indr	FMA:Right index finger	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Ewax.140391	GAAGCTACTGTC	external auditory canal	outer ear canal/earwax	M13Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M13Ewax	FKB0RMH	male	FKB0RMH	sample303	0.006	UBERON:external auditory canal	M13Ewax	M13Ewax	FMA:External auditory canal	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Ewax.140807	GCTCAGTGCAGA	external auditory canal	outer ear canal/earwax	F24Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	F24Ewax	FKB0RMH	female	FKB0RMH	sample300	0.006	UBERON:external auditory canal	F24Ewax	F24Ewax	FMA:External auditory canal	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Plml.140628	TCACTATGGTCA	left palm	left palm	M64Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M64Plml	FKB0RMH	male	FKB0RMH	sample220	0.006	UBERON:skin	M64Plml	M64Plml	FMA:Left palm	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Knee.140481	TAGCGACATCTG	popliteal fossae	back of knees	M54Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M54Knee	FKB0RMH	male	FKB0RMH	sample016	0.006	UBERON:skin	M54Knee	M54Knee	FMA:Popliteal fossa	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Nose.140763	ACTACGTGTGGT	external nose	external nose	M12Nose	646099	UBERON:nose	nose	skin	8/18/08	M12Nose	FFLHOYS	male	FFLHOYS	sample026	0.005	UBERON:skin	M12Nose	M12Nose	FMA:External nose	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Fcsw.140360	TCAGTGACGTAC	stool	stool	M64Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M64Fcsw	FKB0RMH	male	FKB0RMH	sample520	0.006	UBERON:feces	M64Fcsw	M64Fcsw	FMA:Feces	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Tong.140865	TATTCGTGTCAG	dorsal surface of tongue	tongue	M63Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M63Tong	FKB0RMH	male	FKB0RMH	sample551	0.006	UBERON:oral cavity	M63Tong	M63Tong	FMA:Dorsal surface of tongue	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Forr.140759	ACACGAGCCACA	right volar forearm	right forearm	M11Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M11Forr	FFLHOYS	male	FFLHOYS	sample349	0.005	UBERON:skin	M11Forr	M11Forr	FMA:Surface of right arm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Plml.140472	CTTAGCACATCA	left palm	left palm	M13Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M13Plml	FKB0RMH	male	FKB0RMH	sample203	0.006	UBERON:skin	M13Plml	M13Plml	FMA:Left palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Plmr.140435	GCATATAGTCTC	right palm	right palm	F23Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F23Plmr	FKB0RMH	female	FKB0RMH	sample418	0.006	UBERON:skin	F23Plmr	F23Plmr	FMA:Right palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Tong.140366	AGCTCCATACAG	dorsal surface of tongue	tongue	F11Tong	447426	UBERON:tongue	tongue	oral	8/18/08	F11Tong	FFLHOYS	female	FFLHOYS	sample521	0.005	UBERON:oral cavity	F11Tong	F11Tong	FMA:Dorsal surface of tongue	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Tong.140420	GACGCTAGTTCA	dorsal surface of tongue	tongue	M14Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M14Tong	FKB0RMH	male	FKB0RMH	sample536	0.006	UBERON:oral cavity	M14Tong	M14Tong	FMA:Dorsal surface of tongue	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Plml.140347	CACTACTGTTGA	left palm	left palm	F22Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F22Plml	FFLHOYS	female	FFLHOYS	sample194	0.005	UBERON:skin	F22Plml	F22Plml	FMA:Left palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Plml.140503	GATCCGACACTA	left palm	left palm	M23Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M23Plml	FKB0RMH	male	FKB0RMH	sample207	0.006	UBERON:skin	M23Plml	M23Plml	FMA:Left palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Kner.140478	AGAACACGTCTC	right popliteal fossa	right back of knees	F11Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F11Kner	FFLHOYS	female	FFLHOYS	sample329	0.005	UBERON:skin	F11Kner	F11Kner	FMA:Right popliteal fossa	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Aptl.140780	ACTGTCGAAGCT	left axilla	left armpit	F11Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	F11Aptl	FFLHOYS	female	FFLHOYS	sample093	0.005	UBERON:skin	F11Aptl	F11Aptl	FMA:Left axilla	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Fotl.140558	AGCGTAGGTCGT	left plantar foot	left sole of foot	F11Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F11Fotl	FFLHOYS	female	FFLHOYS	sample221	0.005	UBERON:skin	F11Fotl	F11Fotl	FMA:Left foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Indl.140749	ACAGACCACTCA	left palmar index finger	left index finger	M11Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	M11Indl	FFLHOYS	male	FFLHOYS	sample141	0.005	UBERON:skin	M11Indl	M11Indl	FMA:Left index finger	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Navl.140637	CGAGTCTAGTTG	umbilicus	navel	M22Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/20/08	M22Navl	FFO92CG	male	FFO92CG	sample275	0.006	UBERON:skin	M22Navl	M22Navl	FMA:Umbilicus	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Knel.140379	GATCGCAGGTGT	left popliteal fossa	left back of knees	F31Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	F31Knel	FFO92CG	female	FFO92CG	sample111	0.006	UBERON:skin	F31Knel	F31Knel	FMA:Left popliteal fossa	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Knel.140699	GACTAACGTCAC	left popliteal fossa	left back of knees	M42Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M42Knel	FFO92CG	male	FFO92CG	sample120	0.006	UBERON:skin	M42Knel	M42Knel	FMA:Left popliteal fossa	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Fotr.140874	GCATCGTCAACA	right plantar foot	right sole of foot	F31Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	F31Fotr	FFO92CG	female	FFO92CG	sample452	0.006	UBERON:skin	F31Fotr	F31Fotr	FMA:Right foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Aptl.140409	GATCAGAAGATG	left axilla	left armpit	F31Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/20/08	F31Aptl	FFO92CG	female	FFO92CG	sample097	0.006	UBERON:skin	F31Aptl	F31Aptl	FMA:Left axilla	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Frhd.140287	CGACAGCTGACA	forehead	forehead	M22Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/20/08	M22Frhd	FFO92CG	male	FFO92CG	sample050	0.006	UBERON:skin	M22Frhd	M22Frhd	FMA:Forehead	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Ewax.140371	GAGTGGTAGAGA	external auditory canal	outer ear canal/earwax	F14Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	F14Ewax	FKB0RMH	female	FKB0RMH	sample298	0.006	UBERON:external auditory canal	F14Ewax	F14Ewax	FMA:External auditory canal	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Kner.140718	CCATACATAGCT	right popliteal fossa	right back of knees	M22Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M22Kner	FFO92CG	male	FFO92CG	sample338	0.006	UBERON:skin	M22Kner	M22Kner	FMA:Right popliteal fossa	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Fotl.140843	CTGACACGACAG	left plantar foot	left sole of foot	M32Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M32Fotl	FFLHOYS	male	FFLHOYS	sample242	0.005	UBERON:skin	M32Fotl	M32Fotl	FMA:Left foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Fcsp.140374	GACTGATCATCT	stool	stool	M42Fcsp	408170	UBERON:feces	feces	gut	8/20/08	M42Fcsp	FFO92CG	male	FFO92CG	sample513	0.006	UBERON:feces	M42Fcsp	M42Fcsp	FMA:Feces	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Fcsw.140443	GACTAGACCAGC	stool	stool	M14Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M14Fcsw	FKB0RMH	male	FKB0RMH	sample499	0.006	UBERON:feces	M14Fcsw	M14Fcsw	FMA:Feces	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Fotl.140552	GAGTGAGTACAA	left plantar foot	left sole of foot	F14Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F14Fotl	FKB0RMH	female	FKB0RMH	sample224	0.006	UBERON:skin	F14Fotl	F14Fotl	FMA:Left foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Fcsw.140492	GATGATCGCCGA	stool	stool	F31Fcsw	408170	UBERON:feces	feces	gut	8/20/08	F31Fcsw	FFO92CG	female	FFO92CG	sample489	0.006	UBERON:feces	F31Fcsw	F31Fcsw	FMA:Feces	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Forl.140279	CAACTCATCGTA	left volar forearm	left forearm	F22Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F22Forl	FFLHOYS	female	FFLHOYS	sample124	0.005	UBERON:skin	F22Forl	F22Forl	FMA:Surface of left arm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Nose.140325	AGCAGCACTTGT	external nose	external nose	F11Nose	646099	UBERON:nose	nose	skin	8/18/08	F11Nose	FFLHOYS	female	FFLHOYS	sample019	0.005	UBERON:skin	F11Nose	F11Nose	FMA:External nose	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Fotr.140381	CGCAGACAGACT	right plantar foot	right sole of foot	M22Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M22Fotr	FFO92CG	male	FFO92CG	sample461	0.006	UBERON:skin	M22Fotr	M22Fotr	FMA:Right foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Frhd.140730	TAACAGTCGCTG	forehead	forehead	M44Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M44Frhd	FKB0RMH	male	FKB0RMH	sample060	0.006	UBERON:skin	M44Frhd	M44Frhd	FMA:Forehead	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Ewxr.140754	CGGCGATGTACA	right external auditory canal	right outer ear canal/earwax	M31Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M31Ewxr	FFLHOYS	male	FFLHOYS	sample408	0.005	UBERON:external auditory canal	M31Ewxr	M31Ewxr	FMA:Right external auditory canal	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Frhd.140768	TACGTGTACGTG	forehead	forehead	M53Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	M53Frhd	FKB0RMH	male	FKB0RMH	sample061	0.006	UBERON:skin	M53Frhd	M53Frhd	FMA:Forehead	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Nose.140365	AGTGTTCGATCG	external nose	external nose	F12Nose	646099	UBERON:nose	nose	skin	8/18/08	F12Nose	FFLHOYS	female	FFLHOYS	sample020	0.005	UBERON:skin	F12Nose	F12Nose	FMA:External nose	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Fotr.140729	GACGAGTCAGTC	right plantar foot	right sole of foot	M14Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M14Fotr	FKB0RMH	male	FKB0RMH	sample459	0.006	UBERON:skin	M14Fotr	M14Fotr	FMA:Right foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Ewxr.140747	ATTCTGTGAGCG	right external auditory canal	right outer ear canal/earwax	F22Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F22Ewxr	FFLHOYS	female	FFLHOYS	sample402	0.005	UBERON:external auditory canal	F22Ewxr	F22Ewxr	FMA:Right external auditory canal	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Frhd.140469	AGTCTACTCTGA	forehead	forehead	F12Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	F12Frhd	FFLHOYS	female	FFLHOYS	sample034	0.005	UBERON:skin	F12Frhd	F12Frhd	FMA:Forehead	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Knee.140670	GACTGTCATGCA	popliteal fossae	back of knees	F13Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	F13Knee	FKB0RMH	female	FKB0RMH	sample001	0.006	UBERON:skin	F13Knee	F13Knee	FMA:Popliteal fossa	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Navl.140566	AGCACGAGCCTA	umbilicus	navel	F11Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/18/08	F11Navl	FFLHOYS	female	FFLHOYS	sample267	0.005	UBERON:skin	F11Navl	F11Navl	FMA:Umbilicus	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Aptr.140500	ATATGCCAGTGC	right axilla	right armpit	F21Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	F21Aptr	FFLHOYS	female	FFLHOYS	sample317	0.005	UBERON:skin	F21Aptr	F21Aptr	FMA:Right axilla	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Aptr.140339	GCCAGAGTCGTA	right axilla	right armpit	F32Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/20/08	F32Aptr	FFO92CG	female	FFO92CG	sample320	0.006	UBERON:skin	F32Aptr	F32Aptr	FMA:Right axilla	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Fotr.140476	GTACTCTAGACT	right plantar foot	right sole of foot	M34Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M34Fotr	FKB0RMH	male	FKB0RMH	sample467	0.006	UBERON:skin	M34Fotr	M34Fotr	FMA:Right foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Knee.140736	TAAGCGCAGCAC	popliteal fossae	back of knees	M44Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M44Knee	FKB0RMH	male	FKB0RMH	sample014	0.006	UBERON:skin	M44Knee	M44Knee	FMA:Popliteal fossa	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Tong.140677	GAGAGAATGATC	dorsal surface of tongue	tongue	F13Tong	447426	UBERON:tongue	tongue	oral	11/14/08	F13Tong	FKB0RMH	female	FKB0RMH	sample523	0.006	UBERON:oral cavity	F13Tong	F13Tong	FMA:Dorsal surface of tongue	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Tong.140410	GGATCGCAGATC	dorsal surface of tongue	tongue	F32Tong	447426	UBERON:tongue	tongue	oral	8/20/08	F32Tong	FFO92CG	female	FFO92CG	sample530	0.006	UBERON:oral cavity	F32Tong	F32Tong	FMA:Dorsal surface of tongue	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Knee.140288	GAACATGATGAG	popliteal fossae	back of knees	M13Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M13Knee	FKB0RMH	male	FKB0RMH	sample007	0.006	UBERON:skin	M13Knee	M13Knee	FMA:Popliteal fossa	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Knee.140570	TCAGACAGACCG	popliteal fossae	back of knees	M64Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M64Knee	FKB0RMH	male	FKB0RMH	sample018	0.006	UBERON:skin	M64Knee	M64Knee	FMA:Popliteal fossa	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Fcsw.140418	TACGATGACCAC	stool	stool	M44Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M44Fcsw	FKB0RMH	male	FKB0RMH	sample516	0.006	UBERON:feces	M44Fcsw	M44Fcsw	FMA:Feces	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Aptl.140554	CAGATACACTTC	left axilla	left armpit	M21Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/20/08	M21Aptl	FFO92CG	male	FFO92CG	sample101	0.006	UBERON:skin	M21Aptl	M21Aptl	FMA:Left axilla	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Nose.140682	CATATCGCAGTT	external nose	external nose	M21Nose	646099	UBERON:nose	nose	skin	8/20/08	M21Nose	FFO92CG	male	FFO92CG	sample027	0.006	UBERON:skin	M21Nose	M21Nose	FMA:External nose	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Aptl.140844	ACCTGTCTCTCT	left axilla	left armpit	M12Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	M12Aptl	FFLHOYS	male	FFLHOYS	sample100	0.005	UBERON:skin	M12Aptl	M12Aptl	FMA:Left axilla	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Fcsw.140359	CTGTGGATCGAT	stool	stool	M41Fcsw	408170	UBERON:feces	feces	gut	8/20/08	M41Fcsw	FFO92CG	male	FFO92CG	sample512	0.006	UBERON:feces	M41Fcsw	M41Fcsw	FMA:Feces	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Indl.140644	CACAGCTCGAAT	left palmar index finger	left index finger	F22Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	F22Indl	FFLHOYS	female	FFLHOYS	sample138	0.005	UBERON:skin	F22Indl	F22Indl	FMA:Left index finger	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Plml.140620	ACATGATCGTTC	left palm	left palm	M11Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M11Plml	FFLHOYS	male	FFLHOYS	sample201	0.005	UBERON:skin	M11Plml	M11Plml	FMA:Left palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Aptr.140378	AACTCGTCGATG	right axilla	right armpit	M11Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/18/08	M11Aptr	FFLHOYS	male	FFLHOYS	sample321	0.005	UBERON:skin	M11Aptr	M11Aptr	FMA:Right axilla	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Ewxr.140797	AGACGTGCACTG	right external auditory canal	right outer ear canal/earwax	F11Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F11Ewxr	FFLHOYS	female	FFLHOYS	sample399	0.005	UBERON:external auditory canal	F11Ewxr	F11Ewxr	FMA:Right external auditory canal	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Knee.140341	GTCTATCGGAGT	popliteal fossae	back of knees	F34Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	F34Knee	FKB0RMH	female	FKB0RMH	sample006	0.006	UBERON:skin	F34Knee	F34Knee	FMA:Popliteal fossa	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Indr.140781	GAAGAGTGATCA	right palmar index finger	right index finger	M41Indr	539669	UBERON:skin of finger	skin of finger	skin	8/20/08	M41Indr	FFO92CG	male	FFO92CG	sample369	0.006	UBERON:skin	M41Indr	M41Indr	FMA:Right index finger	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Tong.140642	TACAGTCTCATG	dorsal surface of tongue	tongue	M44Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M44Tong	FKB0RMH	male	FKB0RMH	sample548	0.006	UBERON:oral cavity	M44Tong	M44Tong	FMA:Dorsal surface of tongue	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Frhd.140641	CAGTGATCCTAG	forehead	forehead	M21Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/20/08	M21Frhd	FFO92CG	male	FFO92CG	sample049	0.006	UBERON:skin	M21Frhd	M21Frhd	FMA:Forehead	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Ewax.140760	GAGACAGCTTGC	external auditory canal	outer ear canal/earwax	F13Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	F13Ewax	FKB0RMH	female	FKB0RMH	sample297	0.006	UBERON:external auditory canal	F13Ewax	F13Ewax	FMA:External auditory canal	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Aptr.140772	CTGGACTCATAG	right axilla	right armpit	M41Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/20/08	M41Aptr	FFO92CG	male	FFO92CG	sample327	0.006	UBERON:skin	M41Aptr	M41Aptr	FMA:Right axilla	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Plmr.140626	TAGATCCTCGAT	right palm	right palm	M54Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M54Plmr	FKB0RMH	male	FKB0RMH	sample441	0.006	UBERON:skin	M54Plmr	M54Plmr	FMA:Right palm	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Fotr.140798	GACTTCAGTGTG	right plantar foot	right sole of foot	F13Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F13Fotr	FKB0RMH	female	FKB0RMH	sample446	0.006	UBERON:skin	F13Fotr	F13Fotr	FMA:Right foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Plmr.140868	GTTGACGACAGC	right palm	right palm	M44Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M44Plmr	FKB0RMH	male	FKB0RMH	sample439	0.006	UBERON:skin	M44Plmr	M44Plmr	FMA:Right palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Tong.140417	GCGAGATCCAGT	dorsal surface of tongue	tongue	F23Tong	447426	UBERON:tongue	tongue	oral	11/14/08	F23Tong	FKB0RMH	female	FKB0RMH	sample527	0.006	UBERON:oral cavity	F23Tong	F23Tong	FMA:Dorsal surface of tongue	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Fcsp.140470	CTAGTCAGCTGA	stool	stool	M32Fcsp	408170	UBERON:feces	feces	gut	8/18/08	M32Fcsp	FFLHOYS	male	FFLHOYS	sample508	0.005	UBERON:feces	M32Fcsp	M32Fcsp	FMA:Feces	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Aptl.140370	ATGTACGGCGAC	left axilla	left armpit	F22Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	F22Aptl	FFLHOYS	female	FFLHOYS	sample096	0.005	UBERON:skin	F22Aptl	F22Aptl	FMA:Left axilla	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Forr.140732	CAAGATCGACTC	right volar forearm	right forearm	F22Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F22Forr	FFLHOYS	female	FFLHOYS	sample346	0.005	UBERON:skin	F22Forr	F22Forr	FMA:Surface of right arm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Knee.140318	TACTACATGGTC	popliteal fossae	back of knees	M53Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M53Knee	FKB0RMH	male	FKB0RMH	sample015	0.006	UBERON:skin	M53Knee	M53Knee	FMA:Popliteal fossa	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Plmr.140745	ATGCAGCTCAGT	right palm	right palm	F21Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F21Plmr	FFLHOYS	female	FFLHOYS	sample416	0.005	UBERON:skin	F21Plmr	F21Plmr	FMA:Right palm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Fcsp.140643	CTGTGACATTGT	stool	stool	M41Fcsp	408170	UBERON:feces	feces	gut	8/20/08	M41Fcsp	FFO92CG	male	FFO92CG	sample511	0.006	UBERON:feces	M41Fcsp	M41Fcsp	FMA:Feces	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Fcsw.140662	GTCGACTCCTCT	stool	stool	F33Fcsw	408170	UBERON:feces	feces	gut	11/14/08	F33Fcsw	FKB0RMH	female	FKB0RMH	sample492	0.006	UBERON:feces	F33Fcsw	F33Fcsw	FMA:Feces	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Fcsw.140610	ACACACTATGGC	stool	stool	M11Fcsw	408170	UBERON:feces	feces	gut	8/18/08	M11Fcsw	FFLHOYS	male	FFLHOYS	sample495	0.005	UBERON:feces	M11Fcsw	M11Fcsw	FMA:Feces	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Plml.140544	GACAGTTACTGC	left palm	left palm	M14Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M14Plml	FKB0RMH	male	FKB0RMH	sample204	0.006	UBERON:skin	M14Plml	M14Plml	FMA:Left palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F14Frhd.140741	GAGGCTCATCAT	forehead	forehead	F14Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	F14Frhd	FKB0RMH	female	FKB0RMH	sample036	0.006	UBERON:skin	F14Frhd	F14Frhd	FMA:Forehead	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Tong.140540	GTAGATGCTTCG	dorsal surface of tongue	tongue	M34Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M34Tong	FKB0RMH	male	FKB0RMH	sample544	0.006	UBERON:oral cavity	M34Tong	M34Tong	FMA:Dorsal surface of tongue	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Fotr.140431	AGCTATCCACGA	right plantar foot	right sole of foot	F11Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F11Fotr	FFLHOYS	female	FFLHOYS	sample444	0.005	UBERON:skin	F11Fotr	F11Fotr	FMA:Right foot surface	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Ewax.140460	GCAGGATAGATA	external auditory canal	outer ear canal/earwax	M24Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M24Ewax	FKB0RMH	male	FKB0RMH	sample306	0.006	UBERON:external auditory canal	M24Ewax	M24Ewax	FMA:External auditory canal	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Forl.140600	GACTGTCATGCA	left volar forearm	left forearm	M42Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M42Forl	FFO92CG	male	FFO92CG	sample134	0.006	UBERON:skin	M42Forl	M42Forl	FMA:Surface of left arm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Navl.140564	GAGCATTCTCTA	umbilicus	navel	M42Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/20/08	M42Navl	FFO92CG	male	FFO92CG	sample278	0.006	UBERON:skin	M42Navl	M42Navl	FMA:Umbilicus	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Frhd.140623	ATCGTACAACTC	forehead	forehead	F21Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	F21Frhd	FFLHOYS	female	FFLHOYS	sample037	0.005	UBERON:skin	F21Frhd	F21Frhd	FMA:Forehead	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Fotr.140273	GCTATCACGAGT	right plantar foot	right sole of foot	F24Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F24Fotr	FKB0RMH	female	FKB0RMH	sample451	0.006	UBERON:skin	F24Fotr	F24Fotr	FMA:Right foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Kner.140517	AAGAGATGTCGA	right popliteal fossa	right back of knees	M11Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M11Kner	FFLHOYS	male	FFLHOYS	sample335	0.005	UBERON:skin	M11Kner	M11Kner	FMA:Right popliteal fossa	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Knee.140362	GTACGGCATACG	popliteal fossae	back of knees	M34Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M34Knee	FKB0RMH	male	FKB0RMH	sample012	0.006	UBERON:skin	M34Knee	M34Knee	FMA:Popliteal fossa	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Plml.140300	ATAATCTCGTCG	left palm	left palm	F12Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F12Plml	FFLHOYS	female	FFLHOYS	sample190	0.005	UBERON:skin	F12Plml	F12Plml	FMA:Left palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Ewax.140384	TATGGCACACAC	external auditory canal	outer ear canal/earwax	M63Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M63Ewax	FKB0RMH	male	FKB0RMH	sample313	0.006	UBERON:external auditory canal	M63Ewax	M63Ewax	FMA:External auditory canal	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Tong.140619	TAGGTATCTCAC	dorsal surface of tongue	tongue	M54Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M54Tong	FKB0RMH	male	FKB0RMH	sample550	0.006	UBERON:oral cavity	M54Tong	M54Tong	FMA:Dorsal surface of tongue	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Pinl.140717	CACTCTGATTAG	left lateral pinna	left outer ear	F22Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F22Pinl	FFLHOYS	female	FFLHOYS	sample165	0.005	UBERON:skin	F22Pinl	F22Pinl	FMA:Surface of left pinna	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Kner.140545	AGGCTACACGAC	right popliteal fossa	right back of knees	F12Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F12Kner	FFLHOYS	female	FFLHOYS	sample330	0.005	UBERON:skin	F12Kner	F12Kner	FMA:Right popliteal fossa	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Mout.140450	AGTGGATGCTCT	oral cavity	mouth	F12Mout	447426	UBERON:mouth	mouth	oral	8/18/08	F12Mout	FFLHOYS	female	FFLHOYS	sample254	0.005	UBERON:oral cavity	F12Mout	F12Mout	FMA:Oral cavity	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Ewax.140304	GCGACTTGTGTA	external auditory canal	outer ear canal/earwax	F23Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	F23Ewax	FKB0RMH	female	FKB0RMH	sample299	0.006	UBERON:external auditory canal	F23Ewax	F23Ewax	FMA:External auditory canal	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Frhd.140622	GCATGTGCATGT	forehead	forehead	F23Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	F23Frhd	FKB0RMH	female	FKB0RMH	sample039	0.006	UBERON:skin	F23Frhd	F23Frhd	FMA:Forehead	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Aptl.140591	AGCTGACTAGTC	left axilla	left armpit	F12Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	F12Aptl	FFLHOYS	female	FFLHOYS	sample094	0.005	UBERON:skin	F12Aptl	F12Aptl	FMA:Left axilla	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Fcsw.140706	GCGTACAACTGT	stool	stool	F32Fcsw	408170	UBERON:feces	feces	gut	8/20/08	F32Fcsw	FFO92CG	female	FFO92CG	sample491	0.006	UBERON:feces	F32Fcsw	F32Fcsw	FMA:Feces	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Indl.140813	CGAGAGTTACGC	left palmar index finger	left index finger	M22Indl	539665	UBERON:skin of finger	skin of finger	skin	8/20/08	M22Indl	FFO92CG	male	FFO92CG	sample144	0.006	UBERON:skin	M22Indl	M22Indl	FMA:Left index finger	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Aptl.140756	CGCGATAGCAGT	left axilla	left armpit	M31Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	M31Aptl	FFLHOYS	male	FFLHOYS	sample103	0.005	UBERON:skin	M31Aptl	M31Aptl	FMA:Left axilla	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Tong.140829	GCTCGCTACTTC	dorsal surface of tongue	tongue	F24Tong	447426	UBERON:tongue	tongue	oral	11/14/08	F24Tong	FKB0RMH	female	FKB0RMH	sample528	0.006	UBERON:oral cavity	F24Tong	F24Tong	FMA:Dorsal surface of tongue	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Tong.140857	GATAGTGCCACT	dorsal surface of tongue	tongue	M42Tong	447426	UBERON:tongue	tongue	oral	8/20/08	M42Tong	FFO92CG	male	FFO92CG	sample546	0.006	UBERON:oral cavity	M42Tong	M42Tong	FMA:Dorsal surface of tongue	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Fotr.140624	TCAGATCCGATG	right plantar foot	right sole of foot	M64Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M64Fotr	FKB0RMH	male	FKB0RMH	sample475	0.006	UBERON:skin	M64Fotr	M64Fotr	FMA:Right foot surface	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Frhd.140336	CGTATCTGCGAA	forehead	forehead	M31Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	M31Frhd	FFLHOYS	male	FFLHOYS	sample053	0.005	UBERON:skin	M31Frhd	M31Frhd	FMA:Forehead	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Fotr.140698	CAGACATTGCGT	right plantar foot	right sole of foot	F22Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F22Fotr	FFLHOYS	female	FFLHOYS	sample449	0.005	UBERON:skin	F22Fotr	F22Fotr	FMA:Right foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Tong.140453	ACCGCAGAGTCA	dorsal surface of tongue	tongue	M11Tong	447426	UBERON:tongue	tongue	oral	8/18/08	M11Tong	FFLHOYS	male	FFLHOYS	sample533	0.005	UBERON:oral cavity	M11Tong	M11Tong	FMA:Dorsal surface of tongue	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Frhd.140832	GATGTGAGCGCT	forehead	forehead	F31Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/20/08	F31Frhd	FFO92CG	female	FFO92CG	sample041	0.006	UBERON:skin	F31Frhd	F31Frhd	FMA:Forehead	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Fotr.140652	GACGAGTCAGTC	right plantar foot	right sole of foot	M41Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M41Fotr	FFO92CG	male	FFO92CG	sample468	0.006	UBERON:skin	M41Fotr	M41Fotr	FMA:Right foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Plml.140322	GCAGGCAGTACT	left palm	left palm	F31Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	F31Plml	FFO92CG	female	FFO92CG	sample197	0.006	UBERON:skin	F31Plml	F31Plml	FMA:Left palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Ewxr.140654	CAGCTAGAACGC	right external auditory canal	right outer ear canal/earwax	M21Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M21Ewxr	FFO92CG	male	FFO92CG	sample406	0.006	UBERON:external auditory canal	M21Ewxr	M21Ewxr	FMA:Right external auditory canal	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Forl.140295	ATCGCGGACGAT	left volar forearm	left forearm	F21Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F21Forl	FFLHOYS	female	FFLHOYS	sample123	0.005	UBERON:skin	F21Forl	F21Forl	FMA:Surface of left arm	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F24Fcsw.140765	GCTGGTATCTGA	stool	stool	F24Fcsw	408170	UBERON:feces	feces	gut	11/14/08	F24Fcsw	FKB0RMH	female	FKB0RMH	sample487	0.006	UBERON:feces	F24Fcsw	F24Fcsw	FMA:Feces	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Plmr.140454	GACTCACTCAAT	right palm	right palm	F13Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F13Plmr	FKB0RMH	female	FKB0RMH	sample414	0.006	UBERON:skin	F13Plmr	F13Plmr	FMA:Right palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Indr.140588	CGTCAGACGGAT	right palmar index finger	right index finger	M31Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	M31Indr	FFLHOYS	male	FFLHOYS	sample367	0.005	UBERON:skin	M31Indr	M31Indr	FMA:Right index finger	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Fotl.140406	GTCAACGCGATG	left plantar foot	left sole of foot	F33Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F33Fotl	FKB0RMH	female	FKB0RMH	sample231	0.006	UBERON:skin	F33Fotl	F33Fotl	FMA:Left foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Fotl.140711	ACCAGACGATGC	left plantar foot	left sole of foot	M11Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M11Fotl	FFLHOYS	male	FFLHOYS	sample233	0.005	UBERON:skin	M11Fotl	M11Fotl	FMA:Left foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Tong.140427	TCAGGACTGTGT	dorsal surface of tongue	tongue	M64Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M64Tong	FKB0RMH	male	FKB0RMH	sample552	0.006	UBERON:oral cavity	M64Tong	M64Tong	FMA:Dorsal surface of tongue	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Fcsw.140390	GACAGCGTTGAC	stool	stool	M13Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M13Fcsw	FKB0RMH	male	FKB0RMH	sample498	0.006	UBERON:feces	M13Fcsw	M13Fcsw	FMA:Feces	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Ewax.140613	GGCAGTGTATCG	external auditory canal	outer ear canal/earwax	M33Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M33Ewax	FKB0RMH	male	FKB0RMH	sample307	0.006	UBERON:external auditory canal	M33Ewax	M33Ewax	FMA:External auditory canal	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Navl.140838	CACGACAGGCTA	umbilicus	navel	F22Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/18/08	F22Navl	FFLHOYS	female	FFLHOYS	sample270	0.005	UBERON:skin	F22Navl	F22Navl	FMA:Umbilicus	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Plml.140846	GTCGTAGCCAGA	left palm	left palm	F34Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	F34Plml	FKB0RMH	female	FKB0RMH	sample200	0.006	UBERON:skin	F34Plml	F34Plml	FMA:Left palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Tong.140439	GCAGGCAGTACT	dorsal surface of tongue	tongue	M24Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M24Tong	FKB0RMH	male	FKB0RMH	sample540	0.006	UBERON:oral cavity	M24Tong	M24Tong	FMA:Dorsal surface of tongue	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Nose.140594	CGAGTTGTAGCG	external nose	external nose	M22Nose	646099	UBERON:nose	nose	skin	8/20/08	M22Nose	FFO92CG	male	FFO92CG	sample028	0.006	UBERON:skin	M22Nose	M22Nose	FMA:External nose	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Fcsw.140330	TAGTGTGCTTCA	stool	stool	M54Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M54Fcsw	FKB0RMH	male	FKB0RMH	sample518	0.006	UBERON:feces	M54Fcsw	M54Fcsw	FMA:Feces	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Forr.140528	GCGTTACACACA	right volar forearm	right forearm	F32Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/20/08	F32Forr	FFO92CG	female	FFO92CG	sample348	0.006	UBERON:skin	F32Forr	F32Forr	FMA:Surface of right arm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Navl.140871	CGTGACAATGTC	umbilicus	navel	M31Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/18/08	M31Navl	FFLHOYS	male	FFLHOYS	sample276	0.005	UBERON:skin	M31Navl	M31Navl	FMA:Umbilicus	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Pinr.140546	ACCACATACATC	right lateral pinna	right outer ear	M11Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M11Pinr	FFLHOYS	male	FFLHOYS	sample391	0.005	UBERON:skin	M11Pinr	M11Pinr	FMA:Surface of right pinna	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M53Fotr.140451	TACTGCGACAGT	right plantar foot	right sole of foot	M53Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M53Fotr	FKB0RMH	male	FKB0RMH	sample472	0.006	UBERON:skin	M53Fotr	M53Fotr	FMA:Right foot surface	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Fcsw.140506	ATCGATCTGTGG	stool	stool	F21Fcsw	408170	UBERON:feces	feces	gut	8/18/08	F21Fcsw	FFLHOYS	female	FFLHOYS	sample483	0.005	UBERON:feces	F21Fcsw	F21Fcsw	FMA:Feces	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Fcsw.140557	GGTATACGCAGC	stool	stool	M33Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M33Fcsw	FKB0RMH	male	FKB0RMH	sample510	0.006	UBERON:feces	M33Fcsw	M33Fcsw	FMA:Feces	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Aptl.140537	GACGCTAGTTCA	left axilla	left armpit	M42Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/20/08	M42Aptl	FFO92CG	male	FFO92CG	sample106	0.006	UBERON:skin	M42Aptl	M42Aptl	FMA:Left axilla	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Mout.140367	GAAGCTACTGTC	oral cavity	mouth	M41Mout	447426	UBERON:mouth	mouth	oral	8/20/08	M41Mout	FFO92CG	male	FFO92CG	sample265	0.006	UBERON:oral cavity	M41Mout	M41Mout	FMA:Oral cavity	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Forr.140521	GACTTCAGTGTG	right volar forearm	right forearm	M42Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M42Forr	FFO92CG	male	FFO92CG	sample356	0.006	UBERON:skin	M42Forr	M42Forr	FMA:Surface of right arm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Fotl.140348	ATGGATACGCTC	left plantar foot	left sole of foot	F21Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	F21Fotl	FFLHOYS	female	FFLHOYS	sample225	0.005	UBERON:skin	F21Fotl	F21Fotl	FMA:Left foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Plml.140405	GTGATAGTGCCG	left palm	left palm	M43Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M43Plml	FKB0RMH	male	FKB0RMH	sample215	0.006	UBERON:skin	M43Plml	M43Plml	FMA:Left palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Forr.140361	CAGTCGAAGCTG	right volar forearm	right forearm	M21Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M21Forr	FFO92CG	male	FFO92CG	sample351	0.006	UBERON:skin	M21Forr	M21Forr	FMA:Surface of right arm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M64Ewax.140550	TCAGCTCAACTA	external auditory canal	outer ear canal/earwax	M64Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M64Ewax	FKB0RMH	male	FKB0RMH	sample314	0.006	UBERON:external auditory canal	M64Ewax	M64Ewax	FMA:External auditory canal	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Ewax.140864	GTAGAGCTGTTC	external auditory canal	outer ear canal/earwax	M34Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M34Ewax	FKB0RMH	male	FKB0RMH	sample308	0.006	UBERON:external auditory canal	M34Ewax	M34Ewax	FMA:External auditory canal	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Pinl.140722	ACTCGATTCGAT	left lateral pinna	left outer ear	M12Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M12Pinl	FFLHOYS	male	FFLHOYS	sample169	0.005	UBERON:skin	M12Pinl	M12Pinl	FMA:Surface of left pinna	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Fotl.140731	TATGCGAGGTCG	left plantar foot	left sole of foot	M63Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M63Fotl	FKB0RMH	male	FKB0RMH	sample251	0.006	UBERON:skin	M63Fotl	M63Fotl	FMA:Left foot surface	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Fotr.140582	GGACGTCACAGT	right plantar foot	right sole of foot	F32Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	F32Fotr	FFO92CG	female	FFO92CG	sample453	0.006	UBERON:skin	F32Fotr	F32Fotr	FMA:Right foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Knee.140621	GATCTATCCGAG	popliteal fossae	back of knees	M23Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M23Knee	FKB0RMH	male	FKB0RMH	sample009	0.006	UBERON:skin	M23Knee	M23Knee	FMA:Popliteal fossa	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Ewxr.140299	ATCCGATCACAG	right external auditory canal	right outer ear canal/earwax	F21Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F21Ewxr	FFLHOYS	female	FFLHOYS	sample401	0.005	UBERON:external auditory canal	F21Ewxr	F21Ewxr	FMA:Right external auditory canal	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Fcsw.140281	CAACTATCAGCT	stool	stool	F22Fcsw	408170	UBERON:feces	feces	gut	8/18/08	F22Fcsw	FFLHOYS	female	FFLHOYS	sample485	0.005	UBERON:feces	F22Fcsw	F22Fcsw	FMA:Feces	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Plmr.140574	CGATGCACCAGA	right palm	right palm	M22Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M22Plmr	FFO92CG	male	FFO92CG	sample429	0.006	UBERON:skin	M22Plmr	M22Plmr	FMA:Right palm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Tong.140396	GTGTGTGTCAGG	dorsal surface of tongue	tongue	M43Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M43Tong	FKB0RMH	male	FKB0RMH	sample547	0.006	UBERON:oral cavity	M43Tong	M43Tong	FMA:Dorsal surface of tongue	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Tong.140317	GACGATATCGCG	dorsal surface of tongue	tongue	M41Tong	447426	UBERON:tongue	tongue	oral	8/20/08	M41Tong	FFO92CG	male	FFO92CG	sample545	0.006	UBERON:oral cavity	M41Tong	M41Tong	FMA:Dorsal surface of tongue	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Pinl.140354	GAGTGAGTACAA	left lateral pinna	left outer ear	M42Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M42Pinl	FFO92CG	male	FFO92CG	sample175	0.006	UBERON:skin	M42Pinl	M42Pinl	FMA:Surface of left pinna	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Aptl.140820	GCCACTGATAGT	left axilla	left armpit	F32Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/20/08	F32Aptl	FFO92CG	female	FFO92CG	sample098	0.006	UBERON:skin	F32Aptl	F32Aptl	FMA:Left axilla	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Plml.140803	AGCCATACTGAC	left palm	left palm	F11Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	F11Plml	FFLHOYS	female	FFLHOYS	sample189	0.005	UBERON:skin	F11Plml	F11Plml	FMA:Left palm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Knee.140298	GCTTGCGAGACA	popliteal fossae	back of knees	M33Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M33Knee	FKB0RMH	male	FKB0RMH	sample011	0.006	UBERON:skin	M33Knee	M33Knee	FMA:Popliteal fossa	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Fotr.140755	GAACTGTATCTC	right plantar foot	right sole of foot	M13Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M13Fotr	FKB0RMH	male	FKB0RMH	sample458	0.006	UBERON:skin	M13Fotr	M13Fotr	FMA:Right foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Mout.140342	CGTCGATCTCTC	oral cavity	mouth	M31Mout	447426	UBERON:mouth	mouth	oral	8/18/08	M31Mout	FFLHOYS	male	FFLHOYS	sample263	0.005	UBERON:oral cavity	M31Mout	M31Mout	FMA:Oral cavity	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Forl.140437	AGAGCAAGAGCA	left volar forearm	left forearm	F11Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	F11Forl	FFLHOYS	female	FFLHOYS	sample121	0.005	UBERON:skin	F11Forl	F11Forl	FMA:Surface of left arm	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Fcsw.140733	GTGACTGCGGAT	stool	stool	F34Fcsw	408170	UBERON:feces	feces	gut	11/14/08	F34Fcsw	FKB0RMH	female	FKB0RMH	sample493	0.006	UBERON:feces	F34Fcsw	F34Fcsw	FMA:Feces	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Kner.140631	ACGATGCGACCA	right popliteal fossa	right back of knees	M12Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	M12Kner	FFLHOYS	male	FFLHOYS	sample336	0.005	UBERON:skin	M12Kner	M12Kner	FMA:Right popliteal fossa	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Forl.140587	CGAAGACTGCTG	left volar forearm	left forearm	M22Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M22Forl	FFO92CG	male	FFO92CG	sample130	0.006	UBERON:skin	M22Forl	M22Forl	FMA:Surface of left arm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Frhd.140757	GTATCCATGCGA	forehead	forehead	F33Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	11/14/08	F33Frhd	FKB0RMH	female	FKB0RMH	sample043	0.006	UBERON:skin	F33Frhd	F33Frhd	FMA:Forehead	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Indr.140748	CTCCACATGAGA	right palmar index finger	right index finger	M32Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	M32Indr	FFLHOYS	male	FFLHOYS	sample368	0.005	UBERON:skin	M32Indr	M32Indr	FMA:Right index finger	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Indr.140440	ATCTGAGCTGGT	right palmar index finger	right index finger	F21Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	F21Indr	FFLHOYS	female	FFLHOYS	sample359	0.005	UBERON:skin	F21Indr	F21Indr	FMA:Right index finger	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Tong.140559	ACTGATCCTAGT	dorsal surface of tongue	tongue	M12Tong	447426	UBERON:tongue	tongue	oral	8/18/08	M12Tong	FFLHOYS	male	FFLHOYS	sample534	0.005	UBERON:oral cavity	M12Tong	M12Tong	FMA:Dorsal surface of tongue	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Pinl.140703	CGTTATGTACAC	left lateral pinna	left outer ear	M31Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M31Pinl	FFLHOYS	male	FFLHOYS	sample172	0.005	UBERON:skin	M31Pinl	M31Pinl	FMA:Surface of left pinna	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Forl.140724	GCGTATCTTGAT	left volar forearm	left forearm	F32Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/20/08	F32Forl	FFO92CG	female	FFO92CG	sample126	0.006	UBERON:skin	F32Forl	F32Forl	FMA:Surface of left arm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M33Fotr.140533	GGACGTCACAGT	right plantar foot	right sole of foot	M33Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M33Fotr	FKB0RMH	male	FKB0RMH	sample466	0.006	UBERON:skin	M33Fotr	M33Fotr	FMA:Right foot surface	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Plmr.140529	GCTGTGTAGGAC	right palm	right palm	F32Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	F32Plmr	FFO92CG	female	FFO92CG	sample421	0.006	UBERON:skin	F32Plmr	F32Plmr	FMA:Right palm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Pinr.140627	CGTTCGCATAGA	right lateral pinna	right outer ear	M31Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M31Pinr	FFLHOYS	male	FFLHOYS	sample395	0.005	UBERON:skin	M31Pinr	M31Pinr	FMA:Surface of right pinna	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Pinr.140799	GCATAGTAGCCG	right lateral pinna	right outer ear	F31Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	F31Pinr	FFO92CG	female	FFO92CG	sample389	0.006	UBERON:skin	F31Pinr	F31Pinr	FMA:Surface of right pinna	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Knel.140490	CAGCACTAAGCG	left popliteal fossa	left back of knees	M21Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M21Knel	FFO92CG	male	FFO92CG	sample115	0.006	UBERON:skin	M21Knel	M21Knel	FMA:Left popliteal fossa	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Indl.140854	GAACTGTATCTC	left palmar index finger	left index finger	M41Indl	539665	UBERON:skin of finger	skin of finger	skin	8/20/08	M41Indl	FFO92CG	male	FFO92CG	sample147	0.006	UBERON:skin	M41Indl	M41Indl	FMA:Left index finger	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Kner.140678	ATCACTAGTCAC	right popliteal fossa	right back of knees	F21Kner	539669	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F21Kner	FFLHOYS	female	FFLHOYS	sample331	0.005	UBERON:skin	F21Kner	F21Kner	FMA:Right popliteal fossa	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Knel.140855	CTGGAGCATGAC	left popliteal fossa	left back of knees	M41Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/20/08	M41Knel	FFO92CG	male	FFO92CG	sample119	0.006	UBERON:skin	M41Knel	M41Knel	FMA:Left popliteal fossa	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Forr.140586	GATGTCGTGTCA	right volar forearm	right forearm	F31Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/20/08	F31Forr	FFO92CG	female	FFO92CG	sample347	0.006	UBERON:skin	F31Forr	F31Forr	FMA:Surface of right arm	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M14Knee.140681	GACCGAGCTATG	popliteal fossae	back of knees	M14Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M14Knee	FKB0RMH	male	FKB0RMH	sample008	0.006	UBERON:skin	M14Knee	M14Knee	FMA:Popliteal fossa	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Ewxr.140535	GCGATATATCGC	right external auditory canal	right outer ear canal/earwax	F32Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	F32Ewxr	FFO92CG	female	FFO92CG	sample404	0.006	UBERON:external auditory canal	F32Ewxr	F32Ewxr	FMA:Right external auditory canal	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Fotr.140278	ACTGACAGCCAT	right plantar foot	right sole of foot	M12Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	8/18/08	M12Fotr	FFLHOYS	male	FFLHOYS	sample457	0.005	UBERON:skin	M12Fotr	M12Fotr	FMA:Right foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M43Fotr.140475	GTGTACCTATCA	right plantar foot	right sole of foot	M43Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M43Fotr	FKB0RMH	male	FKB0RMH	sample470	0.006	UBERON:skin	M43Fotr	M43Fotr	FMA:Right foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Plmr.140584	ACTCAGATACTC	right palm	right palm	M12Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M12Plmr	FFLHOYS	male	FFLHOYS	sample425	0.005	UBERON:skin	M12Plmr	M12Plmr	FMA:Right palm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Ewax.140661	TAGCTGAGTCCA	external auditory canal	outer ear canal/earwax	M54Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M54Ewax	FKB0RMH	male	FKB0RMH	sample312	0.006	UBERON:external auditory canal	M54Ewax	M54Ewax	FMA:External auditory canal	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Indl.140710	AGTGAGAGAAGC	left palmar index finger	left index finger	F12Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	F12Indl	FFLHOYS	female	FFLHOYS	sample136	0.005	UBERON:skin	F12Indl	F12Indl	FMA:Left index finger	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Knel.140331	ACTTGTAGCAGC	left popliteal fossa	left back of knees	F11Knel	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	8/18/08	F11Knel	FFLHOYS	female	FFLHOYS	sample107	0.005	UBERON:skin	F11Knel	F11Knel	FMA:Left popliteal fossa	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Nose.140779	GAATGATGAGTG	external nose	external nose	M41Nose	646099	UBERON:nose	nose	skin	8/20/08	M41Nose	FFO92CG	male	FFO92CG	sample031	0.006	UBERON:skin	M41Nose	M41Nose	FMA:External nose	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F12Pinl.140479	ATACAGAGCTCC	left lateral pinna	left outer ear	F12Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	F12Pinl	FFLHOYS	female	FFLHOYS	sample164	0.005	UBERON:skin	F12Pinl	F12Pinl	FMA:Surface of left pinna	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Plmr.140307	CTCTGAAGTCTA	right palm	right palm	M32Plmr	539669	UBERON:zone of skin of hand	zone of skin of hand	skin	8/18/08	M32Plmr	FFLHOYS	male	FFLHOYS	sample433	0.005	UBERON:skin	M32Plmr	M32Plmr	FMA:Right palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Forl.140771	CAGTCACTAACG	left volar forearm	left forearm	M21Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/20/08	M21Forl	FFO92CG	male	FFO92CG	sample129	0.006	UBERON:skin	M21Forl	M21Forl	FMA:Surface of left arm	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Fcsw.140616	AGAGAGCAAGTG	stool	stool	F11Fcsw	408170	UBERON:feces	feces	gut	8/18/08	F11Fcsw	FFLHOYS	female	FFLHOYS	sample477	0.005	UBERON:feces	F11Fcsw	F11Fcsw	FMA:Feces	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Fotl.140742	GATACGTCCTGA	left plantar foot	left sole of foot	M42Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	8/20/08	M42Fotl	FFO92CG	male	FFO92CG	sample246	0.006	UBERON:skin	M42Fotl	M42Fotl	FMA:Left foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Fcsw.140647	GCATAGTAGCCG	stool	stool	M24Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M24Fcsw	FKB0RMH	male	FKB0RMH	sample505	0.006	UBERON:feces	M24Fcsw	M24Fcsw	FMA:Feces	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M63Knee.140419	TATCTCGAACTG	popliteal fossae	back of knees	M63Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M63Knee	FKB0RMH	male	FKB0RMH	sample017	0.006	UBERON:skin	M63Knee	M63Knee	FMA:Popliteal fossa	M6	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F34Fotr.140673	GTCTCATGTAGG	right plantar foot	right sole of foot	F34Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F34Fotr	FKB0RMH	female	FKB0RMH	sample455	0.006	UBERON:skin	F34Fotr	F34Fotr	FMA:Right foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Tong.140462	GATGCATGACGC	dorsal surface of tongue	tongue	M23Tong	447426	UBERON:tongue	tongue	oral	11/14/08	M23Tong	FKB0RMH	male	FKB0RMH	sample539	0.006	UBERON:oral cavity	M23Tong	M23Tong	FMA:Dorsal surface of tongue	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Aptl.140830	AACGCACGCTAG	left axilla	left armpit	M11Aptl	539665	UBERON:skin of arm	skin of arm	skin	8/18/08	M11Aptl	FFLHOYS	male	FFLHOYS	sample099	0.005	UBERON:skin	M11Aptl	M11Aptl	FMA:Left axilla	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Pinl.140578	GCTTACATCGAG	left lateral pinna	left outer ear	F32Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	F32Pinl	FFO92CG	female	FFO92CG	sample167	0.006	UBERON:skin	F32Pinl	F32Pinl	FMA:Surface of left pinna	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Forl.140292	CGTACTAGACTG	left volar forearm	left forearm	M31Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M31Forl	FFLHOYS	male	FFLHOYS	sample131	0.005	UBERON:skin	M31Forl	M31Forl	FMA:Surface of left arm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Indr.140762	ACGTGCCGTAGA	right palmar index finger	right index finger	M12Indr	539669	UBERON:skin of finger	skin of finger	skin	8/18/08	M12Indr	FFLHOYS	male	FFLHOYS	sample364	0.005	UBERON:skin	M12Indr	M12Indr	FMA:Right index finger	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M34Plml.140373	GGTCGTAGCGTA	left palm	left palm	M34Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	11/14/08	M34Plml	FKB0RMH	male	FKB0RMH	sample212	0.006	UBERON:skin	M34Plml	M34Plml	FMA:Left palm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F33Fotr.140629	GTATGTTGCTCA	right plantar foot	right sole of foot	F33Fotr	539669	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F33Fotr	FKB0RMH	female	FKB0RMH	sample454	0.006	UBERON:skin	F33Fotr	F33Fotr	FMA:Right foot surface	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F23Fotl.140863	GCCTATACTACA	left plantar foot	left sole of foot	F23Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	F23Fotl	FKB0RMH	female	FKB0RMH	sample227	0.006	UBERON:skin	F23Fotl	F23Fotl	FMA:Left foot surface	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Nose.140531	CTCGATTAGATC	external nose	external nose	M32Nose	646099	UBERON:nose	nose	skin	8/18/08	M32Nose	FFLHOYS	male	FFLHOYS	sample030	0.005	UBERON:skin	M32Nose	M32Nose	FMA:External nose	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Fotl.140595	TACACGATCTAC	left plantar foot	left sole of foot	M44Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M44Fotl	FKB0RMH	male	FKB0RMH	sample248	0.006	UBERON:skin	M44Fotl	M44Fotl	FMA:Left foot surface	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Ewxr.140434	ACGCGATACTGG	right external auditory canal	right outer ear canal/earwax	M12Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M12Ewxr	FFLHOYS	male	FFLHOYS	sample405	0.005	UBERON:external auditory canal	M12Ewxr	M12Ewxr	FMA:Right external auditory canal	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M41Navl.140280	GAAGTCTCGCAT	umbilicus	navel	M41Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/20/08	M41Navl	FFO92CG	male	FFO92CG	sample277	0.006	UBERON:skin	M41Navl	M41Navl	FMA:Umbilicus	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Pinl.140395	GCAGTTCATATC	left lateral pinna	left outer ear	F31Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	F31Pinl	FFO92CG	female	FFO92CG	sample166	0.006	UBERON:skin	F31Pinl	F31Pinl	FMA:Surface of left pinna	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Fcsp.140464	GCGGATGTGACT	stool	stool	F32Fcsp	408170	UBERON:feces	feces	gut	8/20/08	F32Fcsp	FFO92CG	female	FFO92CG	sample490	0.006	UBERON:feces	F32Fcsp	F32Fcsp	FMA:Feces	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M13Fotl.140685	GAAGAGTGATCA	left plantar foot	left sole of foot	M13Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M13Fotl	FKB0RMH	male	FKB0RMH	sample235	0.006	UBERON:skin	M13Fotl	M13Fotl	FMA:Left foot surface	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Fotl.140684	GATCTTCAGTAC	left plantar foot	left sole of foot	M23Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M23Fotl	FKB0RMH	male	FKB0RMH	sample239	0.006	UBERON:skin	M23Fotl	M23Fotl	FMA:Left foot surface	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Forl.140812	ACGCTCATGGAT	left volar forearm	left forearm	M12Forl	539665	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M12Forl	FFLHOYS	male	FFLHOYS	sample128	0.005	UBERON:skin	M12Forl	M12Forl	FMA:Surface of left arm	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M24Knee.140796	GCACTGAGACGT	popliteal fossae	back of knees	M24Knee	539665	UBERON:zone of skin of knee	zone of skin of knee	skin	11/14/08	M24Knee	FKB0RMH	male	FKB0RMH	sample010	0.006	UBERON:skin	M24Knee	M24Knee	FMA:Popliteal fossa	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F13Fcsw.140387	GAGCAGATGCCT	stool	stool	F13Fcsw	408170	UBERON:feces	feces	gut	11/14/08	F13Fcsw	FKB0RMH	female	FKB0RMH	sample480	0.006	UBERON:feces	F13Fcsw	F13Fcsw	FMA:Feces	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Indl.140869	GAGAGCTCTACG	left palmar index finger	left index finger	M42Indl	539665	UBERON:skin of finger	skin of finger	skin	8/20/08	M42Indl	FFO92CG	male	FFO92CG	sample148	0.006	UBERON:skin	M42Indl	M42Indl	FMA:Left index finger	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M42Plml.140513	GAGTATGCAGCC	left palm	left palm	M42Plml	539665	UBERON:zone of skin of hand	zone of skin of hand	skin	8/20/08	M42Plml	FFO92CG	male	FFO92CG	sample214	0.006	UBERON:skin	M42Plml	M42Plml	FMA:Left palm	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M54Fotl.140823	TAGCTCGTAACT	left plantar foot	left sole of foot	M54Fotl	539665	UBERON:zone of skin of foot	zone of skin of foot	skin	11/14/08	M54Fotl	FKB0RMH	male	FKB0RMH	sample250	0.006	UBERON:skin	M54Fotl	M54Fotl	FMA:Left foot surface	M5	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F32Nose.140543	GCTGATGAGCTG	external nose	external nose	F32Nose	646099	UBERON:nose	nose	skin	8/20/08	F32Nose	FFO92CG	female	FFO92CG	sample024	0.006	UBERON:skin	F32Nose	F32Nose	FMA:External nose	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Pinr.140692	CGCACATGTTAT	right lateral pinna	right outer ear	M22Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M22Pinr	FFO92CG	male	FFO92CG	sample394	0.006	UBERON:skin	M22Pinr	M22Pinr	FMA:Surface of right pinna	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F21Mout.140380	ATCTTAGACTGC	oral cavity	mouth	F21Mout	447426	UBERON:mouth	mouth	oral	8/18/08	F21Mout	FFLHOYS	female	FFLHOYS	sample255	0.005	UBERON:oral cavity	F21Mout	F21Mout	FMA:Oral cavity	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Forr.140802	CTATCTAGCGAG	right volar forearm	right forearm	M32Forr	539669	UBERON:skin of forearm	skin of forearm	skin	8/18/08	M32Forr	FFLHOYS	male	FFLHOYS	sample354	0.005	UBERON:skin	M32Forr	M32Forr	FMA:Surface of right arm	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M31Fcsw.140368	CGTACAGTTATC	stool	stool	M31Fcsw	408170	UBERON:feces	feces	gut	8/18/08	M31Fcsw	FFLHOYS	male	FFLHOYS	sample507	0.005	UBERON:feces	M31Fcsw	M31Fcsw	FMA:Feces	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F11Indl.140283	AGATCGGCTCGA	left palmar index finger	left index finger	F11Indl	539665	UBERON:skin of finger	skin of finger	skin	8/18/08	F11Indl	FFLHOYS	female	FFLHOYS	sample135	0.005	UBERON:skin	F11Indl	F11Indl	FMA:Left index finger	F1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Pinr.140516	CATGCAGACTGT	right lateral pinna	right outer ear	M21Pinr	539669	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M21Pinr	FFO92CG	male	FFO92CG	sample393	0.006	UBERON:skin	M21Pinr	M21Pinr	FMA:Surface of right pinna	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M23Fcsw.140821	GATTAGCACTCT	stool	stool	M23Fcsw	408170	UBERON:feces	feces	gut	11/14/08	M23Fcsw	FKB0RMH	male	FKB0RMH	sample504	0.006	UBERON:feces	M23Fcsw	M23Fcsw	FMA:Feces	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M12Frhd.140766	ACGGTGAGTGTC	forehead	forehead	M12Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	M12Frhd	FFLHOYS	male	FFLHOYS	sample046	0.005	UBERON:skin	M12Frhd	M12Frhd	FMA:Forehead	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M32Ewxr.140697	CTAGGTCACTAG	right external auditory canal	right outer ear canal/earwax	M32Ewxr	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/18/08	M32Ewxr	FFLHOYS	male	FFLHOYS	sample409	0.005	UBERON:external auditory canal	M32Ewxr	M32Ewxr	FMA:Right external auditory canal	M3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M44Ewax.140411	TACAGATGGCTC	external auditory canal	outer ear canal/earwax	M44Ewax	646099	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	11/14/08	M44Ewax	FKB0RMH	male	FKB0RMH	sample310	0.006	UBERON:external auditory canal	M44Ewax	M44Ewax	FMA:External auditory canal	M4	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M11Frhd.140548	ACACGGTGTCTA	forehead	forehead	M11Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	M11Frhd	FFLHOYS	male	FFLHOYS	sample045	0.005	UBERON:skin	M11Frhd	M11Frhd	FMA:Forehead	M1	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Aptr.140827	CCAGATGATCGT	right axilla	right armpit	M22Aptr	539669	UBERON:skin of arm	skin of arm	skin	8/20/08	M22Aptr	FFO92CG	male	FFO92CG	sample324	0.006	UBERON:skin	M22Aptr	M22Aptr	FMA:Right axilla	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F31Fcsp.140571	GATCTTCAGTAC	stool	stool	F31Fcsp	408170	UBERON:feces	feces	gut	8/20/08	F31Fcsp	FFO92CG	female	FFO92CG	sample488	0.006	UBERON:feces	F31Fcsp	F31Fcsp	FMA:Feces	F3	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M22Pinl.140526	CGATGTCGTCAA	left lateral pinna	left outer ear	M22Pinl	539665	UBERON:zone of skin of outer ear	zone of skin of outer ear	skin	8/20/08	M22Pinl	FFO92CG	male	FFO92CG	sample171	0.006	UBERON:skin	M22Pinl	M22Pinl	FMA:Surface of left pinna	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+M21Navl.140850	CATATACTCGCA	umbilicus	navel	M21Navl	539665	UBERON:zone of skin of abdomen	zone of skin of abdomen	skin	8/20/08	M21Navl	FFO92CG	male	FFO92CG	sample274	0.006	UBERON:skin	M21Navl	M21Navl	FMA:Umbilicus	M2	Bacterial Community Variation in Human Body Habitats Across Space and Time
+F22Frhd.140836	CAAGTGAGAGAG	forehead	forehead	F22Frhd	539665	UBERON:zone of skin of head	zone of skin of head	skin	8/18/08	F22Frhd	FFLHOYS	female	FFLHOYS	sample038	0.005	UBERON:skin	F22Frhd	F22Frhd	FMA:Forehead	F2	Bacterial Community Variation in Human Body Habitats Across Space and Time
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/scikit-bio presentation.ipynb b/ipynbs/presentations/2014.05.13-ElBrogrammer/scikit-bio presentation.ipynb
new file mode 100644
index 0000000..456f08f
--- /dev/null
+++ b/ipynbs/presentations/2014.05.13-ElBrogrammer/scikit-bio presentation.ipynb	
@@ -0,0 +1,1274 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:be2e38db4846dcb43a6024d9b9896f3aebcabaec8af6bbbc9e0438dd11bbf53f"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# I keep this as a cell in my title slide so I can rerun \n",
+      "# it easily if I make changes, but it's low enough it won't\n",
+      "# be visible in presentation mode.\n",
+      "%run talktools"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "notes"
+      }
+     },
+     "outputs": [
+      {
+       "html": [
+        "<style>\n",
+        "\n",
+        "/* Originally from https://github.com/fperez/nb-slideshow-template */\n",
+        "\n",
+        ".rendered_html\n",
+        "{\n",
+        "  color: #2C5494;\n",
+        "  font-family: Ubuntu;\n",
+        "  font-size: 160%;\n",
+        "  line-height: 1.1;\n",
+        "  margin: 0.5em 0;\n",
+        "  }\n",
+        "\n",
+        ".title\n",
+        "{\n",
+        "  color: #498AF3;\n",
+        "  font-size: 250%;\n",
+        "  font-weight:bold;\n",
+        "  line-height: 1.2; \n",
+        "  margin: 10px 50px 10px;\n",
+        "  }\n",
+        "\n",
+        ".subtitle\n",
+        "{\n",
+        "  color: #386BBC;\n",
+        "  font-size: 180%;\n",
+        "  font-weight:bold;\n",
+        "  line-height: 1.2; \n",
+        "  margin: 20px 50px 20px;\n",
+        "  }\n",
+        "\n",
+        ".slide-header, p.slide-header\n",
+        "{\n",
+        "  color: #498AF3;\n",
+        "  font-size: 200%;\n",
+        "  font-weight:bold;\n",
+        "  margin: 0px 20px 10px;\n",
+        "  page-break-before: always;\n",
+        "  text-align: center;\n",
+        "  }\n",
+        "\n",
+        ".rendered_html h1\n",
+        "{\n",
+        "  color: #498AF3;\n",
+        "  line-height: 1.2; \n",
+        "  margin: 0.15em 0em 0.5em;\n",
+        "  page-break-before: always;\n",
+        "  text-align: center;\n",
+        "  }\n",
+        "\n",
+        "\n",
+        ".rendered_html h2\n",
+        "{ \n",
+        "  color: #386BBC;\n",
+        "  line-height: 1.2;\n",
+        "  margin: 1.1em 0em 0.5em;\n",
+        "  }\n",
+        "\n",
+        ".rendered_html h3\n",
+        "{ \n",
+        "  font-size: 100%;\n",
+        "  line-height: 1.2;\n",
+        "  margin: 1.1em 0em 0.5em;\n",
+        "  }\n",
+        "\n",
+        ".rendered_html li\n",
+        "{\n",
+        "  line-height: 1.8;\n",
+        "  }\n",
+        "\n",
+        ".input_prompt, .CodeMirror-lines, .output_area\n",
+        "{\n",
+        "  font-size: 120%;\n",
+        "  }\n",
+        "\n",
+        ".gap-above\n",
+        "{\n",
+        "  padding-top: 200px;\n",
+        "  }\n",
+        "\n",
+        ".gap01\n",
+        "{\n",
+        "  padding-top: 10px;\n",
+        "  }\n",
+        "\n",
+        ".gap05\n",
+        "{\n",
+        "  padding-top: 50px;\n",
+        "  }\n",
+        "\n",
+        ".gap1\n",
+        "{\n",
+        "  padding-top: 100px;\n",
+        "  }\n",
+        "\n",
+        ".gap2\n",
+        "{\n",
+        "  padding-top: 200px;\n",
+        "  }\n",
+        "\n",
+        ".gap3\n",
+        "{\n",
+        "  padding-top: 300px;\n",
+        "  }\n",
+        "\n",
+        ".emph\n",
+        "{\n",
+        "  color: #386BBC;\n",
+        "  }\n",
+        "\n",
+        ".warn\n",
+        "{\n",
+        "  color: red;\n",
+        "  }\n",
+        "\n",
+        ".center\n",
+        "{\n",
+        "  text-align: center;\n",
+        "  }\n",
+        "\n",
+        ".nb_link\n",
+        "{\n",
+        "    padding-bottom: 0.5em;\n",
+        "}\n",
+        "\n",
+        "</style>\n"
+       ],
+       "metadata": {},
+       "output_type": "display_data",
+       "text": [
+        "<IPython.core.display.HTML at 0x21309d0>"
+       ]
+      }
+     ],
+     "prompt_number": 1
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "<p class=\"title\">scikit-bio:</p>\n",
+      "\n",
+      "<p class=\"subtitle\">interactive bioinformatics in Python</p>\n",
+      "\n",
+      "<center>\n",
+      "\n",
+      "<p class=\"gap05\"<p>\n",
+      "<h2>[scikit-bio.org](http://scikit-bio.org)</h2>\n",
+      "\n",
+      "<p class=\"gap05\"<p>\n",
+      "<h3>Jai Ram Rideout</h3>\n",
+      "<h3>[@ElBrogrammer](https://github.com/ElBrogrammer)</h3>\n",
+      "<h3>[Caporaso Lab](http://caporasolab.us)</h3>\n",
+      "\n",
+      "<p class=\"gap2\"<p>\n",
+      "</center>"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# What's scikit-bio?\n",
+      "\n",
+      "An open source Python bioinformatics library designed to be:\n",
+      "\n",
+      "* collaborative\n",
+      "* interactive\n",
+      "* well-documented\n",
+      "* an educational resource\n",
+      "* performant\n",
+      "* well-tested\n",
+      "\n",
+      "It's under <span class=\"emph\">active development</span> and is <span class=\"warn\">pre-alpha</span>: the API may change!"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# What's with the name?\n",
+      "\n",
+      "## <span class=\"emph\">scikit</span>-<span class=\"warn\">bio</span>:\n",
+      "\n",
+      "It's a <span class=\"emph\">scikit</span>: a toolkit built using SciPy that provides functionality used in <span class=\"warn\">bio</span>informatics.\n",
+      "\n",
+      "It's the first scikit focused on bioinformatics.\n",
+      "\n",
+      "Sometimes abbreviated <span class=\"emph\">skbio</span>."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# scikits"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "website('scikits.appspot.com/scikits', 'List of scikits')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div class=\"nb_link\">\n",
+        "<a href=\"http://scikits.appspot.com/scikits\" target=\"_blank\">List of scikits</a>\n",
+        "</div>\n",
+        "<iframe src=\"http://scikits.appspot.com/scikits\"  width=\"800\" height=\"450\">"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 2,
+       "text": [
+        "<IPython.core.display.HTML at 0x2130d90>"
+       ]
+      }
+     ],
+     "prompt_number": 2
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Who should use it?"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "fragment"
+      }
+     },
+     "source": [
+      "* Researchers\n",
+      "  - Directly use skbio to analyze data, test hypotheses, and reach conclusions\n",
+      "  - Example: this presentation :)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "fragment"
+      }
+     },
+     "source": [
+      "* Software developers\n",
+      "  - Use skbio to build larger systems that answer biological questions\n",
+      "  - Example: QIIME, EMPeror"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "fragment"
+      }
+     },
+     "source": [
+      "* Students\n",
+      "  - Use skbio as tool for learning bioinformatics\n",
+      "  - Example: Introduction to Applied Bioinformatics (IAB)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Interactive computing: flexible API"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.core.distance import DistanceMatrix\n",
+      "\n",
+      "# Load using file path\n",
+      "dm1 = DistanceMatrix.from_file('smalldm.txt')\n",
+      "\n",
+      "# Load using file object\n",
+      "with open('smalldm.txt', 'U') as dm_f:\n",
+      "    dm2 = DistanceMatrix.from_file(dm_f)\n",
+      "\n",
+      "# They should be equal\n",
+      "dm1 == dm2"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 3,
+       "text": [
+        "True"
+       ]
+      }
+     ],
+     "prompt_number": 3
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Interactive computing: ASCII art!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.core.tree import TreeNode\n",
+      "tree = TreeNode.from_newick('((A, B)C, D)root;')\n",
+      "print tree.ascii_art()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "                    /-A\n",
+        "          /C-------|\n",
+        "-root----|          \\-B\n",
+        "         |\n",
+        "          \\-D\n"
+       ]
+      }
+     ],
+     "prompt_number": 1
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Documentation\n",
+      "\n",
+      "## Docstrings\n",
+      "* [numpydoc](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt) standard\n",
+      "* Readable by humans and machines\n",
+      "* Readable from within:\n",
+      "  - the code\n",
+      "  - an interactive session (Python/IPython/IPython Notebook)\n",
+      "  - website (HTML)\n",
+      "  - PDF"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Getting help"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "help(DistanceMatrix)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "Help on class DistanceMatrix in module skbio.core.distance:\n",
+        "\n",
+        "class DistanceMatrix(DissimilarityMatrix)\n",
+        " |  Store distances between objects.\n",
+        " |  \n",
+        " |  A `DistanceMatrix` is a `DissimilarityMatrix` with the additional\n",
+        " |  requirement that the matrix data is symmetric. There are additional methods\n",
+        " |  made available that take advantage of this symmetry.\n",
+        " |  \n",
+        " |  See Also\n",
+        " |  --------\n",
+        " |  DissimilarityMatrix\n",
+        " |  \n",
+        " |  Notes\n",
+        " |  -----\n",
+        " |  The distances are stored in redundant (square-form) format [1]_. To\n",
+        " |  facilitate use with other scientific Python routines (e.g., scipy), the\n",
+        " |  distances can be retrieved in condensed (vector-form) format using\n",
+        " |  `condensed_form`.\n",
+        " |  \n",
+        " |  `DistanceMatrix` only requires that the distances it stores are symmetric.\n",
+        " |  Checks are *not* performed to ensure the other three metric properties\n",
+        " |  hold (non-negativity, identity of indiscernibles, and triangle inequality)\n",
+        " |  [2]_. Thus, a `DistanceMatrix` instance can store distances that are not\n",
+        " |  metric.\n",
+        " |  \n",
+        " |  References\n",
+        " |  ----------\n",
+        " |  .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html\n",
+        " |  .. [2] http://planetmath.org/metricspace\n",
+        " |  \n",
+        " |  Method resolution order:\n",
+        " |      DistanceMatrix\n",
+        " |      DissimilarityMatrix\n",
+        " |      __builtin__.object\n",
+        " |  \n",
+        " |  Methods defined here:\n",
+        " |  \n",
+        " |  condensed_form(self)\n",
+        " |      Return an array of distances in condensed format.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      ndarray\n",
+        " |          One-dimensional ``numpy.ndarray`` of distances in condensed format.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      Condensed format is described in [1]_.\n",
+        " |      \n",
+        " |      The conversion is not a constant-time operation, though it should be\n",
+        " |      relatively quick to perform.\n",
+        " |      \n",
+        " |      References\n",
+        " |      ----------\n",
+        " |      .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html\n",
+        " |  \n",
+        " |  ----------------------------------------------------------------------\n",
+        " |  Methods inherited from DissimilarityMatrix:\n",
+        " |  \n",
+        " |  __eq__(self, other)\n",
+        " |      Compare this dissimilarity matrix to another for equality.\n",
+        " |      \n",
+        " |      Two dissimilarity matrices are equal if they have the same shape, IDs\n",
+        " |      (in the same order!), and have data arrays that are equal.\n",
+        " |      \n",
+        " |      Checks are *not* performed to ensure that `other` is a\n",
+        " |      `DissimilarityMatrix` instance.\n",
+        " |      \n",
+        " |      Parameters\n",
+        " |      ----------\n",
+        " |      other : DissimilarityMatrix\n",
+        " |          Dissimilarity matrix to compare to for equality.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      bool\n",
+        " |          ``True`` if `self` is equal to `other`, ``False`` otherwise.\n",
+        " |      \n",
+        " |      .. shownumpydoc\n",
+        " |  \n",
+        " |  __getitem__(self, index)\n",
+        " |      Slice into dissimilarity data by object ID or numpy indexing.\n",
+        " |      \n",
+        " |      Extracts data from the dissimilarity matrix by object ID, a pair of\n",
+        " |      IDs, or numpy indexing/slicing.\n",
+        " |      \n",
+        " |      Parameters\n",
+        " |      ----------\n",
+        " |      index : str, two-tuple of str, or numpy index\n",
+        " |          `index` can be one of the following forms: an ID, a pair of IDs, or\n",
+        " |          a numpy index.\n",
+        " |      \n",
+        " |          If `index` is a string, it is assumed to be an ID and a\n",
+        " |          ``numpy.ndarray`` row vector is returned for the corresponding ID.\n",
+        " |          Note that the ID's row of dissimilarities is returned, *not* its\n",
+        " |          column. If the matrix is symmetric, the two will be identical, but\n",
+        " |          this makes a difference if the matrix is asymmetric.\n",
+        " |      \n",
+        " |          If `index` is a two-tuple of strings, each string is assumed to be\n",
+        " |          an ID and the corresponding matrix element is returned that\n",
+        " |          represents the dissimilarity between the two IDs. Note that the\n",
+        " |          order of lookup by ID pair matters if the matrix is asymmetric: the\n",
+        " |          first ID will be used to look up the row, and the second ID will be\n",
+        " |          used to look up the column. Thus, ``dm['a', 'b']`` may not be the\n",
+        " |          same as ``dm['b', 'a']`` if the matrix is asymmetric.\n",
+        " |      \n",
+        " |          Otherwise, `index` will be passed through to\n",
+        " |          ``DissimilarityMatrix.data.__getitem__``, allowing for standard\n",
+        " |          indexing of a ``numpy.ndarray`` (e.g., slicing).\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      ndarray or scalar\n",
+        " |          Indexed data, where return type depends on the form of `index` (see\n",
+        " |          description of `index` for more details).\n",
+        " |      \n",
+        " |      Raises\n",
+        " |      ------\n",
+        " |      MissingIDError\n",
+        " |          If the ID(s) specified in `index` are not in the dissimilarity\n",
+        " |          matrix.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      The lookup based on ID(s) is quick.\n",
+        " |      \n",
+        " |      .. shownumpydoc\n",
+        " |  \n",
+        " |  __init__(self, data, ids)\n",
+        " |  \n",
+        " |  __ne__(self, other)\n",
+        " |      Determine whether two dissimilarity matrices are not equal.\n",
+        " |      \n",
+        " |      Parameters\n",
+        " |      ----------\n",
+        " |      other : DissimilarityMatrix\n",
+        " |          Dissimilarity matrix to compare to.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      bool\n",
+        " |          ``True`` if `self` is not equal to `other`, ``False`` otherwise.\n",
+        " |      \n",
+        " |      See Also\n",
+        " |      --------\n",
+        " |      __eq__\n",
+        " |      \n",
+        " |      .. shownumpydoc\n",
+        " |  \n",
+        " |  __str__(self)\n",
+        " |      Return a string representation of the dissimilarity matrix.\n",
+        " |      \n",
+        " |      Summary includes matrix dimensions, a (truncated) list of IDs, and\n",
+        " |      (truncated) array of dissimilarities.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      str\n",
+        " |          String representation of the dissimilarity matrix.\n",
+        " |      \n",
+        " |      .. shownumpydoc\n",
+        " |  \n",
+        " |  copy(self)\n",
+        " |      Return a deep copy of the dissimilarity matrix.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      DissimilarityMatrix\n",
+        " |          Deep copy of the dissimilarity matrix. Will be the same type as\n",
+        " |          `self`.\n",
+        " |  \n",
+        " |  redundant_form(self)\n",
+        " |      Return an array of dissimilarities in redundant format.\n",
+        " |      \n",
+        " |      As this is the native format that the dissimilarities are stored in,\n",
+        " |      this is simply an alias for `data`.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      ndarray\n",
+        " |          Two-dimensional ``numpy.ndarray`` of dissimilarities in redundant\n",
+        " |          format.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      Redundant format is described in [1]_.\n",
+        " |      \n",
+        " |      Does *not* return a copy of the data.\n",
+        " |      \n",
+        " |      References\n",
+        " |      ----------\n",
+        " |      .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html\n",
+        " |  \n",
+        " |  to_file(self, out_f, delimiter='\\t')\n",
+        " |      Save the dissimilarity matrix to file in delimited text format.\n",
+        " |      \n",
+        " |      See Also\n",
+        " |      --------\n",
+        " |      from_file\n",
+        " |      \n",
+        " |      Parameters\n",
+        " |      ----------\n",
+        " |      out_f : file-like object or filename\n",
+        " |          File-like object to write serialized data to, or name of\n",
+        " |          file. If it's a file-like object, it must have a ``write``\n",
+        " |          method, and it won't be closed. Else, it is opened and\n",
+        " |          closed after writing.\n",
+        " |      delimiter : str, optional\n",
+        " |          Delimiter used to separate elements in output format.\n",
+        " |  \n",
+        " |  transpose(self)\n",
+        " |      Return the transpose of the dissimilarity matrix.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      A deep copy is returned.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      DissimilarityMatrix\n",
+        " |          Transpose of the dissimilarity matrix. Will be the same type as\n",
+        " |          `self`.\n",
+        " |  \n",
+        " |  ----------------------------------------------------------------------\n",
+        " |  Class methods inherited from DissimilarityMatrix:\n",
+        " |  \n",
+        " |  from_file(cls, dm_f, delimiter='\\t') from __builtin__.type\n",
+        " |      Load dissimilarity matrix from a delimited text file or file path.\n",
+        " |      \n",
+        " |      Creates a `DissimilarityMatrix` instance from a serialized\n",
+        " |      dissimilarity matrix stored as delimited text.\n",
+        " |      \n",
+        " |      `dm_f` can be a file-like or a file path object containing delimited\n",
+        " |      text. The first line (header) must contain the IDs of each object. The\n",
+        " |      subsequent lines must contain an ID followed by each dissimilarity\n",
+        " |      (float) between the current object and all other objects, where the\n",
+        " |      order of objects is determined by the header line.  For example, a 2x2\n",
+        " |      dissimilarity matrix with IDs ``'a'`` and ``'b'`` might look like::\n",
+        " |      \n",
+        " |          <del>a<del>b\n",
+        " |          a<del>0.0<del>1.0\n",
+        " |          b<del>1.0<del>0.0\n",
+        " |      \n",
+        " |      where ``<del>`` is the delimiter between elements.\n",
+        " |      \n",
+        " |      Parameters\n",
+        " |      ----------\n",
+        " |      dm_f : iterable of str or str\n",
+        " |          Iterable of strings (e.g., open file handle, file-like object, list\n",
+        " |          of strings, etc.) or a file path (a string) containing a serialized\n",
+        " |          dissimilarity matrix.\n",
+        " |      delimiter : str, optional\n",
+        " |          String delimiting elements in `dm_f`.\n",
+        " |      \n",
+        " |      Returns\n",
+        " |      -------\n",
+        " |      DissimilarityMatrix\n",
+        " |          Instance of type `cls` containing the parsed contents of `dm_f`.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      Whitespace-only lines can occur anywhere throughout the \"file\" and are\n",
+        " |      ignored. Lines starting with ``#`` are treated as comments and ignored.\n",
+        " |      These comments can only occur *before* the ID header.\n",
+        " |      \n",
+        " |      IDs will have any leading/trailing whitespace removed when they are\n",
+        " |      parsed.\n",
+        " |      \n",
+        " |      .. note::\n",
+        " |          File-like objects passed to this method will not be closed upon the\n",
+        " |          completion of the parsing, it is responsibility of the owner of the\n",
+        " |          object to perform this operation.\n",
+        " |  \n",
+        " |  ----------------------------------------------------------------------\n",
+        " |  Data descriptors inherited from DissimilarityMatrix:\n",
+        " |  \n",
+        " |  T\n",
+        " |      Transpose of the dissimilarity matrix.\n",
+        " |      \n",
+        " |      See Also\n",
+        " |      --------\n",
+        " |      transpose\n",
+        " |  \n",
+        " |  __dict__\n",
+        " |      dictionary for instance variables (if defined)\n",
+        " |  \n",
+        " |  __weakref__\n",
+        " |      list of weak references to the object (if defined)\n",
+        " |  \n",
+        " |  data\n",
+        " |      Array of dissimilarities.\n",
+        " |      \n",
+        " |      A square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities\n",
+        " |      (floats). A copy is *not* returned.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      This property is not writeable.\n",
+        " |  \n",
+        " |  dtype\n",
+        " |      Data type of the dissimilarities.\n",
+        " |  \n",
+        " |  ids\n",
+        " |      Tuple of object IDs.\n",
+        " |      \n",
+        " |      A tuple of strings, one for each object in the dissimilarity matrix.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      This property is writeable, but the number of new IDs must match the\n",
+        " |      number of objects in `data`.\n",
+        " |  \n",
+        " |  shape\n",
+        " |      Two-element tuple containing the dissimilarity matrix dimensions.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      As the dissimilarity matrix is guaranteed to be square, both tuple\n",
+        " |      entries will always be equal.\n",
+        " |  \n",
+        " |  size\n",
+        " |      Total number of elements in the dissimilarity matrix.\n",
+        " |      \n",
+        " |      Notes\n",
+        " |      -----\n",
+        " |      Equivalent to ``self.shape[0] * self.shape[1]``.\n",
+        "\n"
+       ]
+      }
+     ],
+     "prompt_number": 6
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "DistanceMatrix?"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 7
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "website('scikit-bio.org', 'scikit-bio website')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div class=\"nb_link\">\n",
+        "<a href=\"http://scikit-bio.org\" target=\"_blank\">scikit-bio website</a>\n",
+        "</div>\n",
+        "<iframe src=\"http://scikit-bio.org\"  width=\"800\" height=\"450\">"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 8,
+       "text": [
+        "<IPython.core.display.HTML at 0x213a110>"
+       ]
+      }
+     ],
+     "prompt_number": 8
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Testing/validation\n",
+      "\n",
+      "## Wide variety of tests:\n",
+      "* Unit tests\n",
+      "* Documentation builds\n",
+      "* Dead link checking\n",
+      "* Code style checking (PEP8)\n",
+      "* Doctests\n",
+      "* Code coverage (**94%**)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "fragment"
+      }
+     },
+     "source": [
+      "## Continuous Integration (CI) via Travis-CI\n",
+      "* Every pull request is tested\n",
+      "* Every push to master branch is tested\n",
+      "* Tested against multiple versions (Python and dependencies)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Live demo: distance-based statistics\n",
+      "\n",
+      "Let's work through an analysis that uses *distance-based statistics* to determine whether two or more groups of samples are significantly different (either in center or spread).\n",
+      "\n",
+      "We'll use <span class=\"emph\">ANOSIM</span> and <span class=\"emph\">PERMANOVA</span> to perform the hypothesis tests."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Background: The Data\n",
+      "\n",
+      "We'll use the data from [Costello *et al.* Science (2009) Bacterial Community Variation in Human Body Habitats Across Space and Time](https://www.sciencemag.org/content/326/5960/1694.full).\n",
+      "\n",
+      "Figure 1 shows several different approaches for comparing the resulting UniFrac distance matrix (this image is linked from the *Science* journal website - copyright belongs to *Science*):\n",
+      "\n",
+      "<img src=\"https://www.sciencemag.org/content/326/5960/1694/F1.large.jpg\">\n",
+      "\n",
+      "We'll start with an unweighted UniFrac distance matrix generated by QIIME and a mapping file."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Load the data"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Import the functionality we'll need to perform the analyses.\n",
+      "import pandas as pd\n",
+      "from skbio.core.distance import DistanceMatrix\n",
+      "from skbio.math.stats.distance import ANOSIM, PERMANOVA"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 9
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Load the distance matrix\n",
+      "dm = DistanceMatrix.from_file('dm.txt')\n",
+      "print dm"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "439x439 distance matrix\n",
+        "IDs:\n",
+        "M12Aptr.140800, M41Kner.140735, F24Plmr.140433, M53Tong.140327, F31Indl.140679, ...\n",
+        "Data:\n",
+        "[[ 0.          0.8261686   0.80939057 ...,  0.76901199  0.56819613\n",
+        "   0.67845042]\n",
+        " [ 0.8261686   0.          0.6563376  ...,  0.58830727  0.71583148\n",
+        "   0.72233134]\n",
+        " [ 0.80939057  0.6563376   0.         ...,  0.63909922  0.71900128\n",
+        "   0.71307195]\n",
+        " ..., \n",
+        " [ 0.76901199  0.58830727  0.63909922 ...,  0.          0.6172266\n",
+        "   0.64172663]\n",
+        " [ 0.56819613  0.71583148  0.71900128 ...,  0.6172266   0.          0.478762  ]\n",
+        " [ 0.67845042  0.72233134  0.71307195 ...,  0.64172663  0.478762    0.        ]]\n"
+       ]
+      }
+     ],
+     "prompt_number": 10
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Load the mapping file\n",
+      "mf = pd.read_csv('map.txt', sep='\\t', index_col=0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 11
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Run ANOSIM"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Run ANOSIM with 999 permutations using the body site category.\n",
+      "anosim = ANOSIM(dm, mf, 'BODY_SITE_COARSE')\n",
+      "anosim(999)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
+        "<table border=\"1\" class=\"dataframe\">\n",
+        "  <thead>\n",
+        "    <tr style=\"text-align: right;\">\n",
+        "      <th></th>\n",
+        "      <th>Sample size</th>\n",
+        "      <th>Number of groups</th>\n",
+        "      <th>R statistic</th>\n",
+        "      <th>p-value</th>\n",
+        "      <th>Number of permutations</th>\n",
+        "    </tr>\n",
+        "  </thead>\n",
+        "  <tbody>\n",
+        "    <tr>\n",
+        "      <th>ANOSIM</th>\n",
+        "      <td> 439</td>\n",
+        "      <td> 3</td>\n",
+        "      <td> 0.643089761317</td>\n",
+        "      <td> 0.001</td>\n",
+        "      <td> 999</td>\n",
+        "    </tr>\n",
+        "  </tbody>\n",
+        "</table>\n",
+        "<p>1 rows \u00d7 5 columns</p>\n",
+        "</div>"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 12,
+       "text": [
+        "<skbio.math.stats.distance.base.CategoricalStatsResults at 0x264e410>"
+       ]
+      }
+     ],
+     "prompt_number": 12
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Run ANOSIM with 999 permutations using the sex category.\n",
+      "anosim = ANOSIM(dm, mf, 'SEX')\n",
+      "anosim(999)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
+        "<table border=\"1\" class=\"dataframe\">\n",
+        "  <thead>\n",
+        "    <tr style=\"text-align: right;\">\n",
+        "      <th></th>\n",
+        "      <th>Sample size</th>\n",
+        "      <th>Number of groups</th>\n",
+        "      <th>R statistic</th>\n",
+        "      <th>p-value</th>\n",
+        "      <th>Number of permutations</th>\n",
+        "    </tr>\n",
+        "  </thead>\n",
+        "  <tbody>\n",
+        "    <tr>\n",
+        "      <th>ANOSIM</th>\n",
+        "      <td> 439</td>\n",
+        "      <td> 2</td>\n",
+        "      <td> 0.0221749413225</td>\n",
+        "      <td> 0.030</td>\n",
+        "      <td> 999</td>\n",
+        "    </tr>\n",
+        "  </tbody>\n",
+        "</table>\n",
+        "<p>1 rows \u00d7 5 columns</p>\n",
+        "</div>"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 13,
+       "text": [
+        "<skbio.math.stats.distance.base.CategoricalStatsResults at 0x396ef50>"
+       ]
+      }
+     ],
+     "prompt_number": 13
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Run PERMANOVA"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Run PERMANOVA with 999 permutations using the body site category.\n",
+      "permanova = PERMANOVA(dm, mf, 'BODY_SITE_COARSE')\n",
+      "permanova(999)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
+        "<table border=\"1\" class=\"dataframe\">\n",
+        "  <thead>\n",
+        "    <tr style=\"text-align: right;\">\n",
+        "      <th></th>\n",
+        "      <th>Sample size</th>\n",
+        "      <th>Number of groups</th>\n",
+        "      <th>pseudo-F statistic</th>\n",
+        "      <th>p-value</th>\n",
+        "      <th>Number of permutations</th>\n",
+        "    </tr>\n",
+        "  </thead>\n",
+        "  <tbody>\n",
+        "    <tr>\n",
+        "      <th>PERMANOVA</th>\n",
+        "      <td> 439</td>\n",
+        "      <td> 3</td>\n",
+        "      <td> 51.2561893345</td>\n",
+        "      <td> 0.001</td>\n",
+        "      <td> 999</td>\n",
+        "    </tr>\n",
+        "  </tbody>\n",
+        "</table>\n",
+        "<p>1 rows \u00d7 5 columns</p>\n",
+        "</div>"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 14,
+       "text": [
+        "<skbio.math.stats.distance.base.CategoricalStatsResults at 0x3ce89d0>"
+       ]
+      }
+     ],
+     "prompt_number": 14
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Run PERMANOVA with 999 permutations using the sex category.\n",
+      "permanova = PERMANOVA(dm, mf, 'SEX')\n",
+      "permanova(999)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
+        "<table border=\"1\" class=\"dataframe\">\n",
+        "  <thead>\n",
+        "    <tr style=\"text-align: right;\">\n",
+        "      <th></th>\n",
+        "      <th>Sample size</th>\n",
+        "      <th>Number of groups</th>\n",
+        "      <th>pseudo-F statistic</th>\n",
+        "      <th>p-value</th>\n",
+        "      <th>Number of permutations</th>\n",
+        "    </tr>\n",
+        "  </thead>\n",
+        "  <tbody>\n",
+        "    <tr>\n",
+        "      <th>PERMANOVA</th>\n",
+        "      <td> 439</td>\n",
+        "      <td> 2</td>\n",
+        "      <td> 3.41228470844</td>\n",
+        "      <td> 0.001</td>\n",
+        "      <td> 999</td>\n",
+        "    </tr>\n",
+        "  </tbody>\n",
+        "</table>\n",
+        "<p>1 rows \u00d7 5 columns</p>\n",
+        "</div>"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 15,
+       "text": [
+        "<skbio.math.stats.distance.base.CategoricalStatsResults at 0x3ce82d0>"
+       ]
+      }
+     ],
+     "prompt_number": 15
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Next steps\n",
+      "\n",
+      "* Initial 0.1.0 release this week\n",
+      "* Present at SciPy 2014 (July)\n",
+      "* Stabilize API\n",
+      "* Automated performance testing\n",
+      "* New features\n",
+      "  - alpha/beta diversity metrics\n",
+      "  - more distance-based stats"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Acknowledgments\n",
+      "\n",
+      "Special thanks to:\n",
+      "\n",
+      "* scikit-bio developers\n",
+      "* [Knight Lab](https://knightlab.colorado.edu)\n",
+      "* IPython developers\n",
+      "\n",
+      "This notebook is based on the template/tools provided in Fernando Perez's [nb-slideshow-template](https://github.com/fperez/nb-slideshow-template) repository, and is licensed under the [new BSD license](http://opensource.org/licenses/BSD-3-Clause)."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Thanks for listening!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import skbio\n",
+      "print skbio.title\n",
+      "print skbio.art\n",
+      "print skbio.motto"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "\n",
+        "               _ _    _ _          _     _\n",
+        "              (_) |  (_) |        | |   (_)\n",
+        "      ___  ___ _| | ___| |_ ______| |__  _  ___\n",
+        "     / __|/ __| | |/ / | __|______| '_ \\| |/ _ \\\n",
+        "     \\__ \\ (__| |   <| | |_       | |_) | | (_) |\n",
+        "     |___/\\___|_|_|\\_\\_|\\__|      |_.__/|_|\\___/\n",
+        "\n",
+        "\n",
+        "\n",
+        "\n",
+        "           Opisthokonta\n",
+        "                   \\  Amoebozoa\n",
+        "                    \\ /\n",
+        "                     *    Euryarchaeota\n",
+        "                      \\     |_ Crenarchaeota\n",
+        "                       \\   *\n",
+        "                        \\ /\n",
+        "                         *\n",
+        "                        /\n",
+        "                       /\n",
+        "                      /\n",
+        "                     *\n",
+        "                    / \\\n",
+        "                   /   \\\n",
+        "        Proteobacteria  \\\n",
+        "                       Cyanobacteria\n",
+        "\n",
+        "It's gonna get weird, bro.\n"
+       ]
+      }
+     ],
+     "prompt_number": 16
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/smalldm.txt b/ipynbs/presentations/2014.05.13-ElBrogrammer/smalldm.txt
new file mode 100644
index 0000000..55927e0
--- /dev/null
+++ b/ipynbs/presentations/2014.05.13-ElBrogrammer/smalldm.txt
@@ -0,0 +1,3 @@
+	Sample1	Sample2
+Sample1	0.0	0.5
+Sample2	0.5	0.0
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/style.css b/ipynbs/presentations/2014.05.13-ElBrogrammer/style.css
new file mode 100644
index 0000000..dbe7a84
--- /dev/null
+++ b/ipynbs/presentations/2014.05.13-ElBrogrammer/style.css
@@ -0,0 +1,126 @@
+<style>
+
+/* Originally from https://github.com/fperez/nb-slideshow-template */
+
+.rendered_html
+{
+  color: #2C5494;
+  font-family: Ubuntu;
+  font-size: 160%;
+  line-height: 1.1;
+  margin: 0.5em 0;
+  }
+
+.title
+{
+  color: #498AF3;
+  font-size: 250%;
+  font-weight:bold;
+  line-height: 1.2; 
+  margin: 10px 50px 10px;
+  }
+
+.subtitle
+{
+  color: #386BBC;
+  font-size: 180%;
+  font-weight:bold;
+  line-height: 1.2; 
+  margin: 20px 50px 20px;
+  }
+
+.slide-header, p.slide-header
+{
+  color: #498AF3;
+  font-size: 200%;
+  font-weight:bold;
+  margin: 0px 20px 10px;
+  page-break-before: always;
+  text-align: center;
+  }
+
+.rendered_html h1
+{
+  color: #498AF3;
+  line-height: 1.2; 
+  margin: 0.15em 0em 0.5em;
+  page-break-before: always;
+  text-align: center;
+  }
+
+
+.rendered_html h2
+{ 
+  color: #386BBC;
+  line-height: 1.2;
+  margin: 1.1em 0em 0.5em;
+  }
+
+.rendered_html h3
+{ 
+  font-size: 100%;
+  line-height: 1.2;
+  margin: 1.1em 0em 0.5em;
+  }
+
+.rendered_html li
+{
+  line-height: 1.8;
+  }
+
+.input_prompt, .CodeMirror-lines, .output_area
+{
+  font-size: 120%;
+  }
+
+.gap-above
+{
+  padding-top: 200px;
+  }
+
+.gap01
+{
+  padding-top: 10px;
+  }
+
+.gap05
+{
+  padding-top: 50px;
+  }
+
+.gap1
+{
+  padding-top: 100px;
+  }
+
+.gap2
+{
+  padding-top: 200px;
+  }
+
+.gap3
+{
+  padding-top: 300px;
+  }
+
+.emph
+{
+  color: #386BBC;
+  }
+
+.warn
+{
+  color: red;
+  }
+
+.center
+{
+  text-align: center;
+  }
+
+.nb_link
+{
+    padding-bottom: 0.5em;
+}
+
+</style>
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/talktools.py b/ipynbs/presentations/2014.05.13-ElBrogrammer/talktools.py
new file mode 100644
index 0000000..1f1ee40
--- /dev/null
+++ b/ipynbs/presentations/2014.05.13-ElBrogrammer/talktools.py
@@ -0,0 +1,41 @@
+"""Tools to style a talk."""
+
+# Originally from https://github.com/fperez/nb-slideshow-template
+
+from IPython.display import HTML, display, YouTubeVideo
+
+def prefix(url):
+    prefix = '' if url.startswith('http') else 'http://'
+    return prefix + url
+
+
+def simple_link(url, name=None):
+    name = url if name is None else name
+    url = prefix(url)
+    return '<a href="%s" target="_blank">%s</a>' % (url, name)
+
+
+def html_link(url, name=None):
+    return HTML(simple_link(url, name))
+
+
+# Utility functions
+def website(url, name=None, width=800, height=450):
+    html = []
+    if name:
+        html.extend(['<div class="nb_link">',
+                     simple_link(url, name),
+                     '</div>'] )
+
+    html.append('<iframe src="%s"  width="%s" height="%s">' % 
+                (prefix(url), width, height))
+    return HTML('\n'.join(html))
+
+
+def nbviewer(url, name=None, width=800, height=450):
+    return website('nbviewer.ipython.org/url/' + url, name, width, height)
+
+# Load and publish CSS
+style = HTML(open('style.css').read())
+
+display(style)
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/caporaso-scipy2014.ipynb b/ipynbs/presentations/2014.07.09-gregcaporaso/caporaso-scipy2014.ipynb
new file mode 100644
index 0000000..c92ea19
--- /dev/null
+++ b/ipynbs/presentations/2014.07.09-gregcaporaso/caporaso-scipy2014.ipynb
@@ -0,0 +1,1470 @@
+{
+ "metadata": {
+  "celltoolbar": "Slideshow",
+  "name": "",
+  "signature": "sha256:54c4a927f02d5651df5541ec07a407e31cb07ef22265e894ce6cf4edf21efbb0"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# scikit-bio: core bioinformatics data structures and algorithms in Python\n",
+      "\n",
+      "**J Gregory Caporaso**\n",
+      "\n",
+      "**[caporasolab.us](http://caporasolab.us)**\n",
+      "\n",
+      "**[Northern Arizona University](http://nau.edu)**\n",
+      "\n",
+      "**[scikit-bio.org](http://scikit-bio.org)**\n",
+      "\n",
+      "**GitHub/Twitter**: @gregcaporaso"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from __future__ import division, print_function\n",
+      "from IPython.core import page\n",
+      "page.page = print\n",
+      "\n",
+      "import warnings\n",
+      "warnings.simplefilter('always')"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "skip"
+      }
+     },
+     "outputs": [],
+     "prompt_number": 1
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# The pre-history of scikit-bio...\n",
+      "\n",
+      "![](skbio-timeline.png)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "#### [github.com/biocore](https://github.com/biocore)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Quantitative Insights Into Microbial Ecology\n",
+      "## [Cited 1596 times](http://scholar.google.com/scholar?cites=16903127068530972426&as_sdt=5,44&sciodt=0,44&hl=en) since [publication in 2010](http://www.nature.com/nmeth/journal/v7/n5/full/nmeth.f.303.html)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "source": [
+      "![](qiime-cites.png)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# scikit-bio: framework to make building tools like QIIME easier\n"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Integration with the python scientific computing stack\n",
+      " * scipy, numpy, IPython, matplotlib, pandas\n",
+      "\n",
+      "# Modern community standards\n",
+      " * numpy API documentation standards\n",
+      " * Full PEP8 compliance\n",
+      " * 99% test coverage (via coverage.py)\n",
+      " * Native py2/py3 compatibility\n",
+      " * Hosted on GitHub\n",
+      " * Continuous Integration testing with Travis\n",
+      " * Peer-reviewed code via pull requests\n",
+      " * BSD-licensed"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Core objects and algorithms for bioinformatics\n",
+      "\n",
+      "## [scikit-bio.org/docs/latest/](http://scikit-bio.org/docs/latest/)\n",
+      "\n",
+      "<img src=\"skbio-docs.png\" style=\"width: 600px;\"/ align=\"left\">\n"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# scikit-bio: education-ready and production-ready toolkit"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "\n",
+      "\n",
+      "\n",
+      "# An Introduction To Applied Bioinformatics\n",
+      "## [applied-bioinformatics.org](http://applied-bioinformatics.org)\n",
+      "## Bioinformatics education in the context of production-ready implementations\n",
+      "\n",
+      "<img title=\"Logo by @gregcaporaso.\" style=\"float: right; margin-left: 30px; height:400px;\" src=\"logo.png\" align=right />\n",
+      "\n",
+      "Bioinformatics, as I see it, is the application of the tools of computer science (things like programming languages, algorithms, and databases) to address biological problems (for example, inferring the evolutionary relationship between a group of organisms based on fragments of their genomes, or understanding if or how the community of microorganisms that live in my gut changes if I modify my diet). Bioinformatics is a rapidly growing field, largely in response to the vast increa [...]
+      "\n",
+      "I teach bioinformatics at the undergraduate and graduate levels at Northern Arizona University. This repository contains some of the materials that I've developed in these courses, and represents an initial attempt to organize these materials in a standalone way. If you'd like to read a little more about the project, see my [blog post](http://microbe.net/2014/05/01/teaching-bioinformatics-using-ipython-notebooks/) on [microbe.net](http://microbe.net).\n",
+      "\n",
+      "Disclaimer\n",
+      "----------\n",
+      "\n",
+      "**This project is in very early development stage.** It's not ready for prime-time by any means, but I fall firmly into the \"publish early, publish often\" mindset, hence its public availability. I am very interested in feedback in the form of email (gregcaporaso at gmail.com) or [pull requests](https://help.github.com/articles/using-pull-requests).\n",
+      "\n",
+      "The code in the iab module is **not sufficiently tested, documented, or optimized for production use**. As code reaches those quality standards it will be ported to [scikit-bio](http://www.scikit-bio.org). I do not recommend using the code in the iab module outside of these notebooks. In other words, don't `import iab` outside of the notebooks - if you want access to the functionality in your own code, you should `import skbio`.\n",
+      "\n",
+      "Currently, the **best example of where I'm hoping to go with these materials** is the [multiple sequence alignment](http://nbviewer.ipython.org/github/gregcaporaso/An-Introduction-To-Applied-Bioinformatics/blob/master/algorithms/4-multiple-sequence-alignment.ipynb) chapter.\n"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "\n",
+      "\n",
+      "\n",
+      "# An Introduction To Applied Bioinformatics\n",
+      "## [applied-bioinformatics.org](http://applied-bioinformatics.org)\n",
+      "\n",
+      "![](iab-example.png)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Detailed API documentation, so accesible to new users..."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.core.alignment.pairwise import local_pairwise_align_nucleotide\n",
+      "\n",
+      "local_pairwise_align_nucleotide?"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "\u001b[0;31mType:        \u001b[0mfunction\n",
+        "\u001b[0;31mString form: \u001b[0m<function local_pairwise_align_nucleotide at 0x1045585f0>\n",
+        "\u001b[0;31mFile:        \u001b[0m/Users/caporaso/code/skbio/skbio/core/alignment/pairwise.py\n",
+        "\u001b[0;31mDefinition:  \u001b[0m\u001b[0mlocal_pairwise_align_nucleotide\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mseq1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mseq2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgap_open_penalty\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgap_extend_penalty\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmatch_score\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34 [...]
+        "\u001b[0;31mDocstring:\u001b[0m\n",
+        "Locally align exactly two nucleotide seqs with Smith-Waterman\n",
+        "\n",
+        "Parameters\n",
+        "----------\n",
+        "seq1 : str or BiologicalSequence\n",
+        "    The first unaligned sequence.\n",
+        "seq2 : str or BiologicalSequence\n",
+        "    The second unaligned sequence.\n",
+        "gap_open_penalty : int or float, optional\n",
+        "    Penalty for opening a gap (this is substracted from previous best\n",
+        "    alignment score, so is typically positive).\n",
+        "gap_extend_penalty : int or float, optional\n",
+        "    Penalty for extending a gap (this is substracted from previous best\n",
+        "    alignment score, so is typically positive).\n",
+        "match_score : int or float, optional\n",
+        "    The score to add for a match between a pair of bases (this is added\n",
+        "    to the previous best alignment score, so is typically positive).\n",
+        "mismatch_score : int or float, optional\n",
+        "    The score to add for a mismatch between a pair of bases (this is\n",
+        "    added to the previous best alignment score, so is typically\n",
+        "    negative).\n",
+        "substitution_matrix: 2D dict (or similar)\n",
+        "    Lookup for substitution scores (these values are added to the\n",
+        "    previous best alignment score). If provided, this overrides\n",
+        "    ``match_score`` and ``mismatch_score``.\n",
+        "\n",
+        "Returns\n",
+        "-------\n",
+        "skbio.Alignment\n",
+        "    ``Alignment`` object containing the aligned sequences as well as\n",
+        "    details about the alignment.\n",
+        "\n",
+        "See Also\n",
+        "--------\n",
+        "local_pairwise_align\n",
+        "local_pairwise_align_protein\n",
+        "skbio.core.alignment.local_pairwise_align_ssw\n",
+        "global_pairwise_align\n",
+        "global_pairwise_align_protein\n",
+        "global_pairwise_align_nucelotide\n",
+        "\n",
+        "Notes\n",
+        "-----\n",
+        "Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and\n",
+        "``gap_extend_penalty`` parameters are derived from the NCBI BLAST\n",
+        "Server [1]_.\n",
+        "\n",
+        "References\n",
+        "----------\n",
+        ".. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi\n"
+       ]
+      }
+     ],
+     "prompt_number": 2
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# ... and that documentation is also of course available on the web"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "![](skbio-alignment-docs.png)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# We can easily apply these python aligners"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.core.alignment.pairwise import local_pairwise_align_nucleotide\n",
+      "\n",
+      "s1 = \"ACTAAGGCTCTCTACCCCTCTCAGAGA\"\n",
+      "s2 = \"AAAAAACTCTCTAAACTCACTAAGGCTCTCTACCCCTCTTCAGAGAAGTCGA\"\n",
+      "r = local_pairwise_align_nucleotide(s1, s2)\n",
+      "print(type(r))\n",
+      "print(r)"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "<class 'skbio.core.alignment.alignment.Alignment'>\n",
+        ">0\n",
+        "ACTAAGGCTCTCTACCCCTC-TCAGAGA\n",
+        ">1\n",
+        "ACTAAGGCTCTCTACCCCTCTTCAGAGA\n",
+        "\n"
+       ]
+      },
+      {
+       "output_type": "stream",
+       "stream": "stderr",
+       "text": [
+        "/Users/caporaso/code/skbio/skbio/core/alignment/pairwise.py:300: EfficiencyWarning: You're using skbio's python implementation of Smith-Waterman alignment. This will be very slow (e.g., thousands of times slower) than skbio.core.alignment.local_pairwise_align_ssw.\n",
+        "  EfficiencyWarning)\n"
+       ]
+      }
+     ],
+     "prompt_number": 3
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio import DNA\n",
+      "\n",
+      "s1 = DNA(\"ACTAAGGCTCTCTACCCCTCTCAGAGA\", \"query\")\n",
+      "s2 = DNA(\"AAAAAACTCTCTAAACTCACTAAGGCTCTCTACCCCTCTTCAGAGAAGTCGA\", \"target\")\n",
+      "r = local_pairwise_align_nucleotide(s1, s2)\n",
+      "print(type(r))\n",
+      "print(r)"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "<class 'skbio.core.alignment.alignment.Alignment'>\n",
+        ">query\n",
+        "ACTAAGGCTCTCTACCCCTC-TCAGAGA\n",
+        ">target\n",
+        "ACTAAGGCTCTCTACCCCTCTTCAGAGA\n",
+        "\n"
+       ]
+      },
+      {
+       "output_type": "stream",
+       "stream": "stderr",
+       "text": [
+        "/Users/caporaso/code/skbio/skbio/core/alignment/pairwise.py:300: EfficiencyWarning: You're using skbio's python implementation of Smith-Waterman alignment. This will be very slow (e.g., thousands of times slower) than skbio.core.alignment.local_pairwise_align_ssw.\n",
+        "  EfficiencyWarning)\n"
+       ]
+      }
+     ],
+     "prompt_number": 4
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# But, can't use python for implementing alignment for production code, so provide Cython wrappers with matching interfaces"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.core.alignment import local_pairwise_align_ssw\n",
+      "local_pairwise_align_ssw?"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "\u001b[0;31mType:        \u001b[0mbuiltin_function_or_method\n",
+        "\u001b[0;31mString form: \u001b[0m<built-in function local_pairwise_align_ssw>\n",
+        "\u001b[0;31mDocstring:\u001b[0m\n",
+        "Align query and target sequences with Striped Smith-Waterman.\n",
+        "\n",
+        "Parameters\n",
+        "----------\n",
+        "sequence1 : str or BiologicalSequence\n",
+        "    The first unaligned sequence\n",
+        "sequence2 : str or BiologicalSequence\n",
+        "    The second unaligned sequence\n",
+        "\n",
+        "Returns\n",
+        "-------\n",
+        "``skbio.core.alignment.Alignment``\n",
+        "    The resulting alignment as an Alignment object\n",
+        "\n",
+        "Notes\n",
+        "-----\n",
+        "For a complete list of optional keyword-arguments that can be provided,\n",
+        "see ``skbio.core.alignment.StripedSmithWaterman``.\n",
+        "\n",
+        "The following kwargs will not have any effect: `suppress_sequences` and\n",
+        "`zero_index`\n",
+        "\n",
+        "If an alignment does not meet a provided filter, `None` will be returned.\n",
+        "\n",
+        "See Also\n",
+        "--------\n",
+        "skbio.core.alignment.StripedSmithWaterman\n"
+       ]
+      }
+     ],
+     "prompt_number": 5
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "r = local_pairwise_align_ssw(s1, s2)\n",
+      "print(type(r))\n",
+      "print(r)"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "<class 'skbio.core.alignment.alignment.Alignment'>\n",
+        ">query\n",
+        "ACTAAGGCTCTCTACCCCTC-TCAGAGA\n",
+        ">target\n",
+        "ACTAAGGCTCTCTACCCCTCTTCAGAGA\n",
+        "\n"
+       ]
+      }
+     ],
+     "prompt_number": 6
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# As expected, the C/Cython code is much faster..."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.parse.sequences import parse_fasta\n",
+      "from skbio import SequenceCollection\n",
+      "from random import choice\n",
+      "gg_path = \"/Users/caporaso/data/gg_13_8_otus/rep_set/73_otus.fasta\"\n",
+      "\n",
+      "s = SequenceCollection.from_fasta_records([(i, s) for i, s in parse_fasta(gg_path) if set(s) == set('ACGT')], DNA)"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [],
+     "prompt_number": 7
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%timeit local_pairwise_align_ssw(choice(s), choice(s), gap_open_penalty=5,\\\n",
+      "                                 gap_extend_penalty=2, match_score=2, mismatch_score=-3)"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "100 loops, best of 3: 4.25 ms per loop\n"
+       ]
+      }
+     ],
+     "prompt_number": 8
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "warnings.simplefilter('ignore')\n",
+      "%timeit local_pairwise_align_nucleotide(choice(s), choice(s), gap_open_penalty=5,\\\n",
+      "                                        gap_extend_penalty=2, match_score=2, mismatch_score=-3)"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "-"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "1 loops, best of 3: 18.8 s per loop\n"
+       ]
+      }
+     ],
+     "prompt_number": 9
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# scikit-bio: simpler bioinformatics pipeline development\n",
+      "\n",
+      "## We'll re-create QIIME's [beta_diversity_through_plots.py](http://qiime.org/scripts/beta_diversity_through_plots.html) workflow."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "#### For conceptual discussion, see the *[Studying Biological Diversity](http://nbviewer.ipython.org/github/gregcaporaso/An-Introduction-To-Applied-Bioinformatics/blob/master/applications/1-biological-diversity.ipynb)* chapter of *[An Introduction to Applied Bioinformatics](http://caporasolab.us/An-Introduction-To-Applied-Bioinformatics/)*."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Six samples of the human microbiome (two subjects and three body sites)"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import pandas as pd\n",
+      "sample_md = {\n",
+      "   'A': {'body_site': 'gut', 'subject': '1'},\n",
+      "   'B': {'body_site': 'skin', 'subject': '1'},\n",
+      "   'C': {'body_site': 'tongue', 'subject': '1'},\n",
+      "   'D': {'body_site': 'gut', 'subject': '2'},\n",
+      "   'E': {'body_site': 'tongue', 'subject': '2'},\n",
+      "   'F': {'body_site': 'skin', 'subject': '2'}}\n",
+      "sample_md = pd.DataFrame.from_dict(sample_md, orient='index')\n",
+      "sample_md"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
+        "<table border=\"1\" class=\"dataframe\">\n",
+        "  <thead>\n",
+        "    <tr style=\"text-align: right;\">\n",
+        "      <th></th>\n",
+        "      <th>subject</th>\n",
+        "      <th>body_site</th>\n",
+        "    </tr>\n",
+        "  </thead>\n",
+        "  <tbody>\n",
+        "    <tr>\n",
+        "      <th>A</th>\n",
+        "      <td> 1</td>\n",
+        "      <td>    gut</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>B</th>\n",
+        "      <td> 1</td>\n",
+        "      <td>   skin</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>C</th>\n",
+        "      <td> 1</td>\n",
+        "      <td> tongue</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>D</th>\n",
+        "      <td> 2</td>\n",
+        "      <td>    gut</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>E</th>\n",
+        "      <td> 2</td>\n",
+        "      <td> tongue</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>F</th>\n",
+        "      <td> 2</td>\n",
+        "      <td>   skin</td>\n",
+        "    </tr>\n",
+        "  </tbody>\n",
+        "</table>\n",
+        "<p>6 rows \u00d7 2 columns</p>\n",
+        "</div>"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 10,
+       "text": [
+        "  subject body_site\n",
+        "A       1       gut\n",
+        "B       1      skin\n",
+        "C       1    tongue\n",
+        "D       2       gut\n",
+        "E       2    tongue\n",
+        "F       2      skin\n",
+        "\n",
+        "[6 rows x 2 columns]"
+       ]
+      }
+     ],
+     "prompt_number": 10
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Are samples derived from the same subject more similar than samples from different subjects?\n",
+      "\n",
+      "## (Here we have 6 samples and 7 taxa, but our current record is 15,000 samples and 5.6 million taxa.)"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = [[23, 64, 14, 0, 0, 3, 1],\n",
+      "        [0, 3, 35, 42, 0, 12, 1],\n",
+      "        [0, 5, 5, 0, 40, 40, 0],\n",
+      "        [44, 35, 9, 0, 1, 0, 0],\n",
+      "        [0, 2, 8, 0, 35, 45, 1],\n",
+      "        [0, 0, 25, 35, 0, 19, 0]]\n",
+      "table = pd.DataFrame(data,\n",
+      "                     columns=['Species 1', 'Species 2', 'Species 3', 'Species 4', 'Species 5', 'Species 6', 'Species 7'],\n",
+      "                     index=list('ABCDEF'))\n",
+      "table"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "html": [
+        "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
+        "<table border=\"1\" class=\"dataframe\">\n",
+        "  <thead>\n",
+        "    <tr style=\"text-align: right;\">\n",
+        "      <th></th>\n",
+        "      <th>Species 1</th>\n",
+        "      <th>Species 2</th>\n",
+        "      <th>Species 3</th>\n",
+        "      <th>Species 4</th>\n",
+        "      <th>Species 5</th>\n",
+        "      <th>Species 6</th>\n",
+        "      <th>Species 7</th>\n",
+        "    </tr>\n",
+        "  </thead>\n",
+        "  <tbody>\n",
+        "    <tr>\n",
+        "      <th>A</th>\n",
+        "      <td> 23</td>\n",
+        "      <td> 64</td>\n",
+        "      <td> 14</td>\n",
+        "      <td>  0</td>\n",
+        "      <td>  0</td>\n",
+        "      <td>  3</td>\n",
+        "      <td> 1</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>B</th>\n",
+        "      <td>  0</td>\n",
+        "      <td>  3</td>\n",
+        "      <td> 35</td>\n",
+        "      <td> 42</td>\n",
+        "      <td>  0</td>\n",
+        "      <td> 12</td>\n",
+        "      <td> 1</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>C</th>\n",
+        "      <td>  0</td>\n",
+        "      <td>  5</td>\n",
+        "      <td>  5</td>\n",
+        "      <td>  0</td>\n",
+        "      <td> 40</td>\n",
+        "      <td> 40</td>\n",
+        "      <td> 0</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>D</th>\n",
+        "      <td> 44</td>\n",
+        "      <td> 35</td>\n",
+        "      <td>  9</td>\n",
+        "      <td>  0</td>\n",
+        "      <td>  1</td>\n",
+        "      <td>  0</td>\n",
+        "      <td> 0</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>E</th>\n",
+        "      <td>  0</td>\n",
+        "      <td>  2</td>\n",
+        "      <td>  8</td>\n",
+        "      <td>  0</td>\n",
+        "      <td> 35</td>\n",
+        "      <td> 45</td>\n",
+        "      <td> 1</td>\n",
+        "    </tr>\n",
+        "    <tr>\n",
+        "      <th>F</th>\n",
+        "      <td>  0</td>\n",
+        "      <td>  0</td>\n",
+        "      <td> 25</td>\n",
+        "      <td> 35</td>\n",
+        "      <td>  0</td>\n",
+        "      <td> 19</td>\n",
+        "      <td> 0</td>\n",
+        "    </tr>\n",
+        "  </tbody>\n",
+        "</table>\n",
+        "<p>6 rows \u00d7 7 columns</p>\n",
+        "</div>"
+       ],
+       "metadata": {},
+       "output_type": "pyout",
+       "prompt_number": 11,
+       "text": [
+        "   Species 1  Species 2  Species 3  Species 4  Species 5  Species 6  Species 7\n",
+        "A         23         64         14          0          0          3          1\n",
+        "B          0          3         35         42          0         12          1\n",
+        "C          0          5          5          0         40         40          0\n",
+        "D         44         35          9          0          1          0          0\n",
+        "E          0          2          8          0         35         45          1\n",
+        "F          0          0         25         35          0         19          0\n",
+        "\n",
+        "[6 rows x 7 columns]"
+       ]
+      }
+     ],
+     "prompt_number": 11
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Using scikit-bio and scipy, we can then compute pairwise distances between the samples"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.math.diversity.beta import pw_distances\n",
+      "\n",
+      "bc_dm = pw_distances(table, table.index, \"braycurtis\")\n",
+      "print(bc_dm)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "6x6 distance matrix\n",
+        "IDs:\n",
+        "A, B, C, D, E, F\n",
+        "Data:\n",
+        "[[ 0.          0.78787879  0.86666667  0.30927835  0.85714286  0.81521739]\n",
+        " [ 0.78787879  0.          0.78142077  0.86813187  0.75        0.1627907 ]\n",
+        " [ 0.86666667  0.78142077  0.          0.87709497  0.09392265  0.71597633]\n",
+        " [ 0.30927835  0.86813187  0.87709497  0.          0.87777778  0.89285714]\n",
+        " [ 0.85714286  0.75        0.09392265  0.87777778  0.          0.68235294]\n",
+        " [ 0.81521739  0.1627907   0.71597633  0.89285714  0.68235294  0.        ]]\n"
+       ]
+      }
+     ],
+     "prompt_number": 12
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# And create ordination plots using scikit-bio and matplotlib"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import matplotlib.pyplot as plt\n",
+      "from mpl_toolkits.mplot3d import Axes3D\n",
+      "\n",
+      "def scatter_3d(ord_results, df, column, color_map, title='', axis1=0,\n",
+      "               axis2=1, axis3=2):\n",
+      "   \"\"\"Adapted from Matplotlib Gallery:\n",
+      "   http://matplotlib.org/examples/mplot3d/scatter3d_demo.html\n",
+      "   \"\"\"\n",
+      "   coord_matrix = ord_results.site.T\n",
+      "   ids = ord_results.site_ids\n",
+      "   colors = [color_map[df[column][id_]] for id_ in ord_results.site_ids]\n",
+      "\n",
+      "   fig = plt.figure()\n",
+      "   ax = fig.add_subplot(111, projection='3d')\n",
+      "\n",
+      "   xs = coord_matrix[axis1]\n",
+      "   ys = coord_matrix[axis2]\n",
+      "   zs = coord_matrix[axis3]\n",
+      "   plot = ax.scatter(xs, ys, zs, c=colors, s=150)\n",
+      "\n",
+      "   ax.set_xlabel('PC %d' % (axis1 + 1))\n",
+      "   ax.set_ylabel('PC %d' % (axis2 + 1))\n",
+      "   ax.set_zlabel('PC %d' % (axis3 + 1))\n",
+      "   ax.set_xticklabels([])\n",
+      "   ax.set_yticklabels([])\n",
+      "   ax.set_zticklabels([])\n",
+      "   ax.set_title(title)\n",
+      "   return fig"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "skip"
+      }
+     },
+     "outputs": [],
+     "prompt_number": 13
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.math.stats.ordination import PCoA\n",
+      "bc_pc = PCoA(bc_dm).scores()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 14
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# This function adapted from Matplotlib Gallery's:\n",
+      "# http://matplotlib.org/examples/mplot3d/scatter3d_demo.html\n",
+      "fig = scatter_3d(bc_pc, sample_md, 'subject', {'1': 'yellow', '2': 'purple'},\n",
+      "                 'Samples colored by subject')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "metadata": {},
+       "output_type": "display_data",
+       "png": "iVBORw0KGgoAAAANSUhEUgAAAV0AAADtCAYAAAAcNaZ2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsfXmYFOXV/anee2Z6Zthh2GEQkJ1hWAxCJJ9x+fATEyNq\nNLgEP5NfVCQuidEEjUtcomhihLhE1IhbHmOMogZXjIIBgQEU3ABhYAZkmO6e3pf398d8t3i7pqq6\nqrt6nTrPM48yU131VnXVqfvee+55ARMmTJgwYcKECRMmTJgwYcKECRMmTJgwYcKECRMmTJgwYcKE\nCRMmTJgw0U0gqP2RMcbyNRATJkyYKBcIgqDIrZZ8DsSECRMmujtM0jVhwoSJPMIk3W6MZcuW4cIL\nLyz0MAAA3/72t/Hoo48W/FjFcE2GDRuGN998U/Zv69atw5gxY/I8IhNGwiTdAuD999/HCSecgNra\nWvTq1QuzZ [...]
+       "text": [
+        "<matplotlib.figure.Figure at 0x104f03a50>"
+       ]
+      }
+     ],
+     "prompt_number": 15
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# This function adapted from Matplotlib Gallery's:\n",
+      "# http://matplotlib.org/examples/mplot3d/scatter3d_demo.html\n",
+      "fig = scatter_3d(bc_pc, sample_md, 'body_site',\n",
+      "                 {'gut': 'b', 'skin': 'r', 'tongue': 'g'},\n",
+      "                 'Samples colored by body site')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "metadata": {},
+       "output_type": "display_data",
+       "png": "iVBORw0KGgoAAAANSUhEUgAAAV0AAADtCAYAAAAcNaZ2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsXXmcFNXVPdV7z84OwzYDo4gsAgMDJAgRP6P4kaCJcYuK\nSjTGLxg0ahIjEY37SkyMIG644ZYYYxLEYKJxA0XZFRRlHVYZprunp9fq9/0xucXrmqrqqu7qder8\nfvNTpmuqXlVXnbrv3nPPAyxYsGDBggULFixYsGDBggULFixYsGDBggULFixYsGDBggULFixYsGDB\nQheBoPUhY4zlaiAWLFiwUCoQBEGVW225HIgFCxYsdHVYpGvBggULOYRFul0ACxYswIUXXpjvYQAA\nvvWtb+Gxxx7L+7HMvCZ1dXV48803TdlXNq5PZWUlduzYYeo+LaQPi3SziHfffRff+MY3UFNTgx49\nemDKlClYs [...]
+       "text": [
+        "<matplotlib.figure.Figure at 0x104f97410>"
+       ]
+      }
+     ],
+     "prompt_number": 16
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# And finally run stats to determine if clustering patterns are signficiant"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.math.stats.distance import ANOSIM\n",
+      "anosim = ANOSIM(bc_dm, sample_md, column='subject')\n",
+      "results = anosim(999)\n",
+      "print(results.statistic)\n",
+      "\n",
+      "print(results.p_value < 0.05)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "-0.407407407407\n",
+        "False\n"
+       ]
+      }
+     ],
+     "prompt_number": 17
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "anosim = ANOSIM(bc_dm, sample_md, column='body_site')\n",
+      "results = anosim(999)\n",
+      "print(results.statistic)\n",
+      "\n",
+      "print(results.p_value < 0.1)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "1.0\n",
+        "True\n"
+       ]
+      }
+     ],
+     "prompt_number": 18
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# And these are the types of examples that we document:"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "![](skbio-bdiv-docs.png)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Finally, for fun (and to show the generalizability of scikit-bio):\n",
+      "\n",
+      "<center>\n",
+      "## Can we use the same tools to model the \"evolutionary\" relationships between human languages?"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Let's start with tuples of: \n",
+      "\n",
+      "<pre>\n",
+      "   (name of language, \n",
+      "    spelling of phrase for ordering a beer,\n",
+      "    phonetic phrase for ordering a beer)\n",
+      "</pre>\n",
+      "\n",
+      "[[Source](http://esperanto-usa.org/en/content/how-order-beer-47-languages)]"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "languages = [(\"Afrikaans\", \"'n Bier, asseblief\", \"A beer ah-suh-bleef\"),\n",
+      " (\"Basque\", \"Garagardo bat, mesedez\", \"Gara-gardo bat mese-des\"),\n",
+      " (\"Breton\", \"Ur banne bier am bo, mar plij\", \"Oor bah-ne beer am boh mar pleezh\"),\n",
+      " (\"Catalan\", \"Una cervesa, si us plau\", \"Oona servayzeh see oos plow\"),\n",
+      " (\"Croatian\", \"Jedno pivo, molim\", \"Yed-no pee-vo, mo-lim\"),\n",
+      " (\"Czech / Slovak\", \"Pivo, pros\u00edm\", \"Pee-vo, pro-seem\"),\n",
+      " (\"Danish\", \"Jeg vil gerne have en \u00f8l\", \"Yay vil geh-neh heh en url\"),\n",
+      " (\"Dutch\", \"Een bier, alsjeblieft\", \"Un beer, ahls-yer-bleeft\"),\n",
+      " (\"English\", \"One beer, please\", \"Wun beer, pleez\"),\n",
+      " (\"Esperanto\", \"Unu bieron, mi petas\", \"Oo-noo bee-airon, mee peh-tahs\"),\n",
+      " (\"Estonian\", \"\u00dcks \u00f5lu, palun\", \"Ooks ur-loo, pah-lun\"),\n",
+      " (\"Finnish\", \"Olut mulle, kiitos\", \"O-loot moolek kee-tos\"),\n",
+      " (\"French\", \"Une bi\u00e8re, s'il vous pla\u00eet\", \"Oon bee-air, seel voo pleh\"),\n",
+      " (\"German\", \"Ein Bier, bitte\", \"Ine beer, bitt-uh\"),\n",
+      " (\"Hungarian\", \"Egy poh\u00e1r s\u00f6rt k\u00e9rek\", \"Edj pohar shurt kayrek\"),\n",
+      " (\"Icelandic\", \"Einn bj\u00f3r, takk\", \"Ay-dn byohr tahk\"),\n",
+      " (\"Irish\", \"Beoir amh\u00e1in, le do thoil\", \"Byohr awoyn, lyeh doh hull\"),\n",
+      " (\"Italian\", \"Una birra, per favore\", \"Oo-na beer-ra, pair fa-vo-re\"),\n",
+      " (\"Latin\", \"Cervisiam, sodes\", \"Ker-wi-see-am, soh-dehs\"),\n",
+      " (\"Latvian\", \"Vienu alu, l\u016b-dzu\", \"Vyeh-noo ah-loo, loo dzoo\"),\n",
+      " (\"Lithuanian\", \"Pra\u0161au viena alaus\", \"Pra-shau vie-na al-lows\"),\n",
+      " (\"Maltese\", \"Wiehed birra, jekk jghogbok\", \"Wee-het bir-ra yek yoh-dzbok\"),\n",
+      " (\"Norwegian\", \"En \u00f8l, takk\", \"Ehn url tahk\"),\n",
+      " (\"Occitan\", \"Una cervesa, se vos plai\", \"Oo-no serbeh-zo se bus ply\"),\n",
+      " (\"Polish\", \"Jedno piwo, prosz\u0119\", \"Yed-no peevo proshe\"),\n",
+      " (\"Portuguese\", \"Uma cerveja, por favor\", \"Oo-ma ser-vay-ja, poor fa-vohr\"),\n",
+      " (\"Romansch Ladina\", \"\u00dcna biera, per plaschair.\", \"Oo-nuh bee-air-uh per plah-chair\"),\n",
+      " (\"Sardinian\", \"Una birra, po piaghere\", \"Oo-na beer-ra po pia-gehre\"),\n",
+      " (\"Scots Gaelic\", \"Leann, mas e do thoil e\", \"Lyawn mahs eh doh hawl eh\"),\n",
+      " (\"Slovene\", \"Eno pivo, prosim\", \"Eno pee-vo pro-seem\"),\n",
+      " (\"Spanish (Lat. Am.)\", \"Una cerveza, por favor\", \"Oo-na ser-veh-sa, por fa-vor\"),\n",
+      " (\"Spanish (Spain)\", \"Una cerveza, por favor\", \"Oo-na thair-veh-tha, por fa-vor\"),\n",
+      " (\"Strine\", \"Foster's, mate\", \"Faw-stuhz, mayt\"),\n",
+      " (\"Swedish\", \"En \u00f6l, tack\", \"Ehn irl, tahk\"),\n",
+      " (\"Twi\", \"Mame beer baako, mi pawokyew\", \"Mah-me bee-ye bah-ko mee pow-che-oo\"),\n",
+      " (\"Turkish\", \"Bir bira, l\u00fctfen\", \"Beer beer-ah luht-fen\"),\n",
+      " (\"Welsh\", \"Cwrw os gwelwch in dda\", \"Koo-roh ohs gwel-ookh-un-thah\")]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 19
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "skip"
+      }
+     },
+     "source": [
+      "# We'll build a basic nucleotide-like substitution matrix"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "language_to_pron = {e[0]: e[2] for e in languages}\n",
+      "\n",
+      "all_pron_chars = []\n",
+      "for e in language_to_pron.values():\n",
+      "    all_pron_chars.extend(e)\n",
+      "all_pron_chars = set(all_pron_chars)\n",
+      "\n",
+      "pron_substitution_matrix = {}\n",
+      "for c in all_pron_chars:\n",
+      "    row = {}.fromkeys(all_pron_chars, -2.0)\n",
+      "    row[c] = 5.0\n",
+      "    pron_substitution_matrix[c] = row"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "skip"
+      }
+     },
+     "outputs": [],
+     "prompt_number": 20
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# We can then globally align the phrases and compute distances between them"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from skbio.core.alignment.pairwise import global_pairwise_align\n",
+      "\n",
+      "alignment = global_pairwise_align(language_to_pron[\"Swedish\"],\n",
+      "                                  language_to_pron[\"Norwegian\"], \n",
+      "                                  gap_open_penalty=5, gap_extend_penalty=2,\n",
+      "                                  substitution_matrix=pron_substitution_matrix)\n",
+      "print(alignment.to_fasta())\n",
+      "print(\"Hamming distance: %1.3f\" % alignment.distances()[0,1])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        ">0\n",
+        "Ehn irl, tahk\n",
+        ">1\n",
+        "Ehn url- tahk\n",
+        "\n",
+        "Hamming distance: 0.154\n"
+       ]
+      }
+     ],
+     "prompt_number": 21
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "alignment = global_pairwise_align(language_to_pron[\"Swedish\"],\n",
+      "                                  language_to_pron[\"Icelandic\"], \n",
+      "                                  gap_open_penalty=5, gap_extend_penalty=2,\n",
+      "                                  substitution_matrix=pron_substitution_matrix)\n",
+      "print(alignment.to_fasta())\n",
+      "print(\"Hamming distance: %1.3f\" % alignment.distances()[0,1])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        ">0\n",
+        "--Ehn ---irl, tahk\n",
+        ">1\n",
+        "Ay-dn byohr-- tahk\n",
+        "\n",
+        "Hamming distance: 0.556\n"
+       ]
+      }
+     ],
+     "prompt_number": 22
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "alignment = global_pairwise_align(language_to_pron[\"Spanish (Spain)\"],\n",
+      "                                  language_to_pron[\"Italian\"], \n",
+      "                                  gap_open_penalty=5, gap_extend_penalty=2,\n",
+      "                                  substitution_matrix=pron_substitution_matrix)\n",
+      "print(alignment.to_fasta())\n",
+      "print(\"Hamming distance: %1.3f\" % alignment.distances()[0,1])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        ">0\n",
+        "Oo-na thair-veh-tha, p-or fa-vo-r-\n",
+        ">1\n",
+        "Oo-na be-----er-r-a, pair fa-vo-re\n",
+        "\n",
+        "Hamming distance: 0.353\n"
+       ]
+      }
+     ],
+     "prompt_number": 23
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# We can go further, and compute all pairwise alignments and distances"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "warnings.simplefilter('ignore')\n",
+      "from skbio.core.alignment.pairwise import global_pairwise_align\n",
+      "from skbio import DistanceMatrix\n",
+      "\n",
+      "languages = language_to_pron.keys()\n",
+      "distances = np.zeros((len(languages), len(languages)))\n",
+      "for i, language1 in enumerate(languages):\n",
+      "    language1_phrase = language_to_pron[language1]\n",
+      "    for j in range(i):\n",
+      "        language2 = languages[j]\n",
+      "        language2_phrase = language_to_pron[language2]\n",
+      "        alignment = global_pairwise_align(language1_phrase, language2_phrase,\n",
+      "                                          gap_open_penalty=5, gap_extend_penalty=2,\n",
+      "                                          substitution_matrix=pron_substitution_matrix)\n",
+      "        distances[i, j] = distances[j, i] = alignment.distances()[0,1]\n",
+      "\n",
+      "dm = DistanceMatrix(distances, languages)\n",
+      "print(dm)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "37x37 distance matrix\n",
+        "IDs:\n",
+        "Swedish, Icelandic, Estonian, Turkish, Twi, Sardinian, Romansch Ladina, Dutch, ...\n",
+        "Data:\n",
+        "[[ 0.          0.5         0.6        ...,  0.6         0.76923077\n",
+        "   0.76470588]\n",
+        " [ 0.5         0.          0.8        ...,  0.66666667  0.76923077\n",
+        "   0.76470588]\n",
+        " [ 0.6         0.8         0.         ...,  0.66666667  0.73076923\n",
+        "   0.72727273]\n",
+        " ..., \n",
+        " [ 0.6         0.66666667  0.66666667 ...,  0.          0.59375     0.61111111]\n",
+        " [ 0.76923077  0.76923077  0.73076923 ...,  0.59375     0.          0.65714286]\n",
+        " [ 0.76470588  0.76470588  0.72727273 ...,  0.61111111  0.65714286  0.        ]]\n"
+       ]
+      }
+     ],
+     "prompt_number": 24
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# And build a tree to visualize relationships"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from scipy.cluster.hierarchy import average, dendrogram, to_tree\n",
+      "\n",
+      "lm = average(dm.condensed_form())\n",
+      "\n",
+      "def format_dendrogram(tip_count):\n",
+      "    import matplotlib.pylab as plt\n",
+      "    ax = plt.gca()\n",
+      "    fig = plt.gcf()\n",
+      "    height = tip_count * 0.4\n",
+      "    if height < 3:\n",
+      "        height = 3\n",
+      "    fig.set_size_inches(7, height)\n",
+      "    font = {'family' : 'normal',\n",
+      "        'weight' : 'normal',\n",
+      "        'size'   : 18}\n",
+      "\n",
+      "    matplotlib.rc('font', **font)\n",
+      "    return ax\n",
+      "\n",
+      "format_dendrogram(dm.shape[0])\n",
+      "d = dendrogram(lm, labels=dm.ids, orientation='right',\n",
+      "               link_color_func=lambda x: 'black')\n"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "metadata": {},
+       "output_type": "display_data",
+       "png": "iVBORw0KGgoAAAANSUhEUgAAAksAAANeCAYAAAAC0UqeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3XmcXfP9x/HXJQghzIj1Z4klsRS1L20x9lIlWg0VS4La\nitqVVoUSSy2hbVKtSmKnETu1RlBrkFqCkIh9S2aQDZGZ3x/v7+2cOXPumTtzZ+bcO/N+Ph73cWfO\n/Z7v+d6bcD/5fD/n+wUzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzq2C5juh0++23b5gw\nYUJHdG1mZpXpv8DGWQ/CrC0W6ohOJ0yYQENDQ8U+zj777MzH4Pfg91AuD7+H7B+VPv6GhgaA73fE\n941ZZ+iQYMnMzKxENUA9cEgnXW9wuN72bTi3HhjVrqOxsuJgyczMslCDgoxCj62AhvDoDA2Udr3O\nGqdloEfWA [...]
+       "text": [
+        "<matplotlib.figure.Figure at 0x104f88750>"
+       ]
+      }
+     ],
+     "prompt_number": 25
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Currently in pre-alpha release stage, and working on defining our scope. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "source": [
+      "# Contributors:\n",
+      "\n",
+      "Adam Robbins-Pianka (**@adamrp**) | Antonio Gonzalez (**@antgonza**) | Daniel McDonald (**@wasade**) | Evan Bolyen (**@ebolyen**) | Greg Caporaso (**@gregcaporaso**) | Jai Ram Rideout (**@ElBrogrammer**) | Jens Reeder (**@jensreeder**) | Jorge Ca\u00f1ardo Alastuey (**@Jorge-C**) | Jose Antonio Navas Molina (**@josenavas**) | Joshua Shorenstein (**@squirrelo**) | Yoshiki V\u00e1zquez Baeza (**@ElDeveloper**) | @charudatta-navare | John Chase (**@johnchase**) | Karen Schwarzberg (* [...]
+      "\n",
+      "Rob Knight (**@rob-knight**) | Gavin Huttley (**@gavin-huttley**) | Micah Hamady | Sandra Smit | Cathy Lozupone (**@clozupone**) | Mike Robeson (**@mikerobeson**) | Marcin Cieslik | Peter Maxwell | Jeremy Widmann | Zongzhi Liu | Michael Dwan | Logan Knecht (**@loganknecht**) | Andrew Cochran | Jose Carlos Clemente (**@cleme**) | Damien Coy | Levi McCracken | Andrew Butterfield | Justin Kuczynski (**@justin212k**) | Matthew Wakefield (**@genomematt**)\n",
+      "\n",
+      "![](code-sprint-2.jpg)"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import skbio\n",
+      "print(skbio.title)\n",
+      "print(skbio.art)"
+     ],
+     "language": "python",
+     "metadata": {
+      "slideshow": {
+       "slide_type": "slide"
+      }
+     },
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "\n",
+        "               _ _    _ _          _     _\n",
+        "              (_) |  (_) |        | |   (_)\n",
+        "      ___  ___ _| | ___| |_ ______| |__  _  ___\n",
+        "     / __|/ __| | |/ / | __|______| '_ \\| |/ _ \\\n",
+        "     \\__ \\ (__| |   <| | |_       | |_) | | (_) |\n",
+        "     |___/\\___|_|_|\\_\\_|\\__|      |_.__/|_|\\___/\n",
+        "\n",
+        "\n",
+        "\n",
+        "\n",
+        "           Opisthokonta\n",
+        "                   \\  Amoebozoa\n",
+        "                    \\ /\n",
+        "                     *    Euryarchaeota\n",
+        "                      \\     |_ Crenarchaeota\n",
+        "                       \\   *\n",
+        "                        \\ /\n",
+        "                         *\n",
+        "                        /\n",
+        "                       /\n",
+        "                      /\n",
+        "                     *\n",
+        "                    / \\\n",
+        "                   /   \\\n",
+        "        Proteobacteria  \\\n",
+        "                       Cyanobacteria\n",
+        "\n"
+       ]
+      }
+     ],
+     "prompt_number": 26
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/code-deletion-pr.png b/ipynbs/presentations/2014.07.09-gregcaporaso/code-deletion-pr.png
new file mode 100644
index 0000000..5bf8f65
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/code-deletion-pr.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/code-sprint-1.jpg b/ipynbs/presentations/2014.07.09-gregcaporaso/code-sprint-1.jpg
new file mode 100644
index 0000000..d785559
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/code-sprint-1.jpg differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/code-sprint-2.jpg b/ipynbs/presentations/2014.07.09-gregcaporaso/code-sprint-2.jpg
new file mode 100644
index 0000000..09d59ef
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/code-sprint-2.jpg differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/custom.css b/ipynbs/presentations/2014.07.09-gregcaporaso/custom.css
new file mode 100644
index 0000000..cf215cd
--- /dev/null
+++ b/ipynbs/presentations/2014.07.09-gregcaporaso/custom.css
@@ -0,0 +1,39 @@
+.reveal > .backgrounds {
+    margin-top: 10px;
+    background:linear-gradient(to bottom, white 60%, #EEE);
+    position: fixed;
+}
+
+.reveal .state-background {
+    background:url('skbio.png') 0px -5px no-repeat;
+    
+}
+
+.reveal h1 {
+    color: #259D57;
+    text-shadow: 1px 1px 1px #92ceab;
+}
+
+.reveal a, .reveal a:visited, .reveal a:active, .reveal a:hover {
+    color: #00B84D !important;
+}
+
+.reveal .controls div.navigate-right, .reveal .controls div.navigate-right.enabled, .reveal .controls div.navigate-right.enabled:hover{
+    border-left-color: #00B84D;
+}
+
+.reveal .controls div.navigate-left, .reveal .controls div.navigate-left.enabled, .reveal .controls div.navigate-left.enabled:hover {
+    border-right-color: #00B84D;
+}
+
+.reveal .controls div.navigate-up, .reveal .controls div.navigate-up.enabled, .reveal .controls div.navigate-up.enabled:hover {
+    border-bottom-color: #00B84D;
+}
+
+.reveal .controls div.navigate-down, .reveal .controls div.navigate-down.enabled, .reveal .controls div.navigate-down.enabled:hover {
+    border-top-color: #00B84D;
+}
+
+.reveal .progress span {
+    background: none repeat scroll 0% 0% #259D57;
+}
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/iab-example.png b/ipynbs/presentations/2014.07.09-gregcaporaso/iab-example.png
new file mode 100644
index 0000000..7796631
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/iab-example.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/logo.png b/ipynbs/presentations/2014.07.09-gregcaporaso/logo.png
new file mode 100644
index 0000000..5e246aa
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/logo.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/qiime-cites.png b/ipynbs/presentations/2014.07.09-gregcaporaso/qiime-cites.png
new file mode 100644
index 0000000..78bda8e
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/qiime-cites.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-alignment-docs.png b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-alignment-docs.png
new file mode 100644
index 0000000..c5c69db
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-alignment-docs.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-bdiv-docs.png b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-bdiv-docs.png
new file mode 100644
index 0000000..f5b6fc1
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-bdiv-docs.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-contributors.png b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-contributors.png
new file mode 100644
index 0000000..80850d1
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-contributors.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-docs.png b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-docs.png
new file mode 100644
index 0000000..a4df4b0
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-docs.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-timeline.png b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-timeline.png
new file mode 100644
index 0000000..1a05d1c
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio-timeline.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/skbio.png b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio.png
new file mode 100644
index 0000000..2c1c770
Binary files /dev/null and b/ipynbs/presentations/2014.07.09-gregcaporaso/skbio.png differ
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/style.css b/ipynbs/presentations/2014.07.09-gregcaporaso/style.css
new file mode 100644
index 0000000..0ea9692
--- /dev/null
+++ b/ipynbs/presentations/2014.07.09-gregcaporaso/style.css
@@ -0,0 +1,150 @@
+<style>
+
+#ipython-main-app {  
+  background:linear-gradient(to bottom, white 60%, #EEE);
+  }
+  
+#maintoolbar {
+    height: 30px;
+}
+  
+div.cell.selected {
+  border: none;
+  border-radius: 0px;
+  margin-left: -2px;
+  margin-bottom: 1px;
+  margin-top: 1px;
+  border-left: 3px solid #259D57;
+}
+
+div.out_prompt_overlay {
+    display: none !important;
+    }
+
+.rendered_html
+{
+  color: #24372C !important;
+  font-family: Ubuntu;
+  font-size: 140%;
+  line-height: 1.1;
+  margin: 0.5em 0;
+  }
+
+.title
+{
+  color: #259D57;
+  font-size: 250%;
+  font-weight:bold;
+  line-height: 1.2; 
+  margin: 10px 50px 10px;
+  }
+
+.subtitle
+{
+  color: #259D57;
+  font-size: 180%;
+  font-weight:bold;
+  line-height: 1.2; 
+  margin: 20px 50px 20px;
+  }
+
+.slide-header, p.slide-header
+{
+  color: #259D57;
+  font-size: 200%;
+  font-weight:bold;
+  margin: 0px 20px 10px;
+  page-break-before: always;
+  text-align: center;
+  }
+
+.rendered_html h1
+{
+  color: #259D57;
+  line-height: 1.2; 
+  margin: 0.15em 0em 0.5em;
+  page-break-before: always;
+  text-align: center;
+  }
+
+
+.rendered_html h2
+{ 
+  color: #259D57;
+  line-height: 1.2;
+  margin: 1.1em 0em 0.5em;
+  }
+
+.rendered_html h3
+{ 
+  font-size: 100%;
+  line-height: 1.2;
+  margin: 1.1em 0em 0.5em;
+  }
+
+.rendered_html li
+{
+  line-height: 1.8;
+  }
+
+.input_prompt, .CodeMirror-lines, .output_area
+{
+  font-family: Consolas;
+  font-size: 120%;
+  }
+
+.gap-above
+{
+  padding-top: 200px;
+  }
+
+.gap01
+{
+  padding-top: 10px;
+  }
+
+.gap05
+{
+  padding-top: 50px;
+  }
+
+.gap1
+{
+  padding-top: 100px;
+  }
+
+.gap2
+{
+  padding-top: 200px;
+  }
+
+.gap3
+{
+  padding-top: 300px;
+  }
+
+.emph
+{
+  color: #386BBC;
+  }
+
+.warn
+{
+  color: red;
+  }
+
+.center
+{
+  text-align: center;
+  }
+
+.nb_link
+{
+    padding-bottom: 0.5em;
+}
+
+a, a:visited, a:active, a:hover {
+  color: #00B84D;
+  }
+
+</style>
diff --git a/ipynbs/presentations/2014.07.09-gregcaporaso/talktools.py b/ipynbs/presentations/2014.07.09-gregcaporaso/talktools.py
new file mode 100644
index 0000000..47e0673
--- /dev/null
+++ b/ipynbs/presentations/2014.07.09-gregcaporaso/talktools.py
@@ -0,0 +1,39 @@
+"""Tools to style a talk."""
+
+from IPython.display import HTML, display, YouTubeVideo
+
+def prefix(url):
+    prefix = '' if url.startswith('http') else 'http://'
+    return prefix + url
+
+
+def simple_link(url, name=None):
+    name = url if name is None else name
+    url = prefix(url)
+    return '<a href="%s" target="_blank">%s</a>' % (url, name)
+
+
+def html_link(url, name=None):
+    return HTML(simple_link(url, name))
+
+
+# Utility functions
+def website(url, name=None, width=800, height=450):
+    html = []
+    if name:
+        html.extend(['<div class="nb_link">',
+                     simple_link(url, name),
+                     '</div>'] )
+
+    html.append('<iframe src="%s"  width="%s" height="%s">' % 
+                (prefix(url), width, height))
+    return HTML('\n'.join(html))
+
+
+def nbviewer(url, name=None, width=800, height=450):
+    return website('nbviewer.ipython.org/url/' + url, name, width, height)
+
+# Load and publish CSS
+style = HTML(open('style.css').read())
+
+display(style)
diff --git a/licenses/fastq-example-files-readme.txt b/licenses/fastq-example-files-readme.txt
new file mode 100644
index 0000000..f0c6c26
--- /dev/null
+++ b/licenses/fastq-example-files-readme.txt
@@ -0,0 +1,109 @@
+This README file describes the FASTQ example files provided as supplementary
+information to the open-access publication:
+
+P.J.A. Cock, C.J. Fields, N. Goto, M.L. Heuer and P.M. Rice (2009). The Sanger
+FASTQ file format for sequences with quality scores, and the Solexa/Illumina
+FASTQ variants.
+
+These files are provided freely and we encourage anyone writing a FASTQ parser
+to use them as part of your test suite. Permission is granted to freely
+distribute and modify the files. We request (but do not insist) that this
+README file is included, or at least a reference to the above paper. Please
+cite the above paper if appropriate. We also request (but do not insist) that
+the example files are not modified, in order that they may serve as a common
+reference.
+
+Invalid FASTQ files
+===================
+
+The archive contains the following sample FASTQ files with names of the form
+error_NAME.fastq, which all contain errors and should be rejected (if parsed
+as any of the three FASTQ variants):
+
+error_diff_ids.fastq
+error_double_qual.fastq
+error_double_seq.fastq
+error_long_qual.fastq
+error_no_qual.fastq
+error_qual_del.fastq
+error_qual_escape.fastq
+error_qual_null.fastq
+error_qual_space.fastq
+error_qual_tab.fastq
+error_qual_unit_sep.fastq
+error_qual_vtab.fastq
+error_short_qual.fastq
+error_spaces.fastq
+error_tabs.fastq
+error_trunc_at_seq.fastq
+error_trunc_at_plus.fastq
+error_trunc_at_qual.fastq
+error_trunc_in_title.fastq
+error_trunc_in_seq.fastq
+error_trunc_in_plus.fastq
+error_trunc_in_qual.fastq
+
+Of these, those with names error_qual_XXX.fastq would be valid except for the
+inclusion of spaces or non-printing ASCII characters outside the range allowed
+in the quality string. The files named error_trunc_XXX.fastq would be valid
+but for being truncated (e.g. simulating a partial copy over the network).
+
+The special cases of FASTQ files which would be valid as one variant, but not
+another, are covered below.
+
+Valid FASTQ
+===========
+
+The archive contains the following valid sample FASTQ input files for testing:
+
+longreads_original_sanger.fastq
+wrapping_original_sanger.fastq
+illumina_full_range_original_illumina.fastq
+sanger_full_range_original_sanger.fastq
+solexa_full_range_original_solexa.fastq
+misc_dna_original_sanger.fastq
+misc_rna_original_sanger.fastq
+
+These all have the form NAME_original_FORMAT.fastq, where NAME is a prefix for
+that example, and FORMAT is one of sanger, solexa or illumina indicating which
+FASTQ variant that example is using. There are three matching files called
+NAME_as_FORMAT.fastq showing how the original file should be converted into
+each of the three FASTQ variants. These converted files are standardised not
+to use line wrapping (so each record has exactly four lines), and omit the
+optional repetition of the read titles on the plus line.
+
+The file longreads_original_sanger.fastq is based on real Roche 454 reads from
+the Sanger Institute for the the potato cyst nematodes Globodera pallida. Ten
+of the reads have been presented as FASTQ records, wrapping the sequence and
+the quality lines at 80 characters. This means some of the quality lines start
+with "@" or "+" characters, which may cause problems with naive parsers. Also
+note that the sequence is mixed case (with upper case denoting the trimmed
+region), and furthermore the free format title lines are over 100 characters
+and encode assorted read information (and are repeated on the "+" lines).
+
+The wrapping_original_sanger.fastq is based on three real reads from the NCBI
+Short Read Archive, but has been carefully edited to use line wrapping for the
+quality lines (but not the sequence lines) such that the due to the occurrence
+of "@" and "+" on alternating lines, the file may be misinterpreted by a
+simplistic parser. While this is therefore a very artificial example, it
+remains a valid FASTQ file, and is useful for testing purposes.
+
+The sanger_full_range_original_sanger.fastq file uses PHRED scores from 0 to
+93 inclusive, covering ASCII characters from 33 (!) to 126 (~). This means it
+cannot be treated as a Solexa or Illumina 1.3+ FASTQ file, and attempting to
+parse it as such should raise an error.
+
+The solexa_full_range_original_solexa.fastq file uses Solexa scores from -5 to
+62 inclusive, covering ASCII characters from 59 (;) to 126 (~). This means it
+cannot be treated as a Illumina 1.3+ FASTQ file, and attempting to parse it as
+such should raise an error. On the basis of the quality characters, the file
+would also qualify as a valid Sanger FASTQ file.
+
+The illumina_full_range_original_illumina.fastq file uses PHRED scores from 0
+to 62 inclusive, covering ASCII characters from 64 (@) to 126 (~). On the
+basis of the quality characters, the file would also qualify as a valid Sanger
+or Solexa FASTQ file.
+
+The misc_dna_original_sanger.fastq and misc_rna_original_sanger.fastq files
+are artificial reads using the full range of IUPAC DNA or RNA letters,
+including ambiguous character codes, and both cases.
diff --git a/licenses/ipython.txt b/licenses/ipython.txt
new file mode 100644
index 0000000..59674ac
--- /dev/null
+++ b/licenses/ipython.txt
@@ -0,0 +1,74 @@
+=============================
+ The IPython licensing terms
+=============================
+
+IPython is licensed under the terms of the Modified BSD License (also known as
+New or Revised or 3-Clause BSD), as follows:
+
+- Copyright (c) 2008-2014, IPython Development Team
+- Copyright (c) 2001-2007, Fernando Perez <fernando.perez at colorado.edu>
+- Copyright (c) 2001, Janko Hauser <jhauser at zscout.de>
+- Copyright (c) 2001, Nathaniel Gray <n8gray at caltech.edu>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the IPython Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+About the IPython Development Team
+----------------------------------
+
+Fernando Perez began IPython in 2001 based on code from Janko Hauser
+<jhauser at zscout.de> and Nathaniel Gray <n8gray at caltech.edu>.  Fernando is still
+the project lead.
+
+The IPython Development Team is the set of all contributors to the IPython
+project.  This includes all of the IPython subprojects. A full list with
+details is kept in the documentation directory, in the file
+``about/credits.txt``.
+
+The core team that coordinates development on GitHub can be found here:
+https://github.com/ipython/.
+
+Our Copyright Policy
+--------------------
+
+IPython uses a shared copyright model. Each contributor maintains copyright
+over their contributions to IPython. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the IPython
+source code, in its entirety is not the copyright of any single person or
+institution.  Instead, it is the collective copyright of the entire IPython
+Development Team.  If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the IPython repositories.
+
+With this in mind, the following banner should be used in any source code file 
+to indicate the copyright and license terms:
+
+::
+
+    # Copyright (c) IPython Development Team.
+    # Distributed under the terms of the Modified BSD License.
diff --git a/licenses/nb-slideshow-template.txt b/licenses/nb-slideshow-template.txt
new file mode 100644
index 0000000..24d72d2
--- /dev/null
+++ b/licenses/nb-slideshow-template.txt
@@ -0,0 +1,30 @@
+Licensed under the terms of the Simplified BSD license:
+http://opensource.org/licenses/BSD-3-Clause
+
+
+Copyright (c) 2013, Fernando Perez.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+Neither the name of the <ORGANIZATION> nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/licenses/numpydoc.txt b/licenses/numpydoc.txt
new file mode 100644
index 0000000..fe10d70
--- /dev/null
+++ b/licenses/numpydoc.txt
@@ -0,0 +1 @@
+numpydoc license is at scikit-bio/doc/sphinxext/numpydoc/LICENSE.txt
diff --git a/licenses/qiita.txt b/licenses/qiita.txt
new file mode 100644
index 0000000..1b041d0
--- /dev/null
+++ b/licenses/qiita.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Qiita development team
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+  Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+  Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+
+  Neither the name of the {organization} nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/licenses/scikit-learn.txt b/licenses/scikit-learn.txt
new file mode 100644
index 0000000..5a47062
--- /dev/null
+++ b/licenses/scikit-learn.txt
@@ -0,0 +1,35 @@
+------------------------------------------------------------------------------
+    The file doc/source/_static/copybutton.js has the following license:
+
+New BSD License
+
+Copyright (c) 2007–2014 The scikit-learn developers.
+All rights reserved.
+
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+  a. Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+  b. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+  c. Neither the name of the Scikit-learn Developers  nor the names of
+     its contributors may be used to endorse or promote products
+     derived from this software without specific prior written
+     permission. 
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
diff --git a/licenses/scipy.txt b/licenses/scipy.txt
new file mode 100644
index 0000000..b72893b
--- /dev/null
+++ b/licenses/scipy.txt
@@ -0,0 +1,31 @@
+Copyright (c) 2001, 2002 Enthought, Inc.
+All rights reserved.
+
+Copyright (c) 2003-2012 SciPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+  a. Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+  b. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+  c. Neither the name of Enthought nor the names of the SciPy Developers
+     may be used to endorse or promote products derived from this software
+     without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/licenses/sphinx-bootstrap-theme.txt b/licenses/sphinx-bootstrap-theme.txt
new file mode 100644
index 0000000..a22b4af
--- /dev/null
+++ b/licenses/sphinx-bootstrap-theme.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2011-2014 Ryan Roemer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/licenses/ssw.txt b/licenses/ssw.txt
new file mode 100644
index 0000000..39f340f
--- /dev/null
+++ b/licenses/ssw.txt
@@ -0,0 +1,46 @@
+/* The MIT License
+
+   Copyright (c) 2012-1015 Boston College.
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be
+   included in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.
+*/
+
+/* Contact: Mengyao Zhao <zhangmp at bc.edu> */
+
+/*
+ *  ssw.c
+ *
+ *  Created by Mengyao Zhao on 6/22/10.
+ *  Copyright 2010 Boston College. All rights reserved.
+ *  Version 0.1.4
+ *  Last revision by Mengyao Zhao on 12/07/12.
+ *
+ */
+
+/*
+ *  ssw.h
+ *
+ *  Created by Mengyao Zhao on 6/22/10.
+ *  Copyright 2010 Boston College. All rights reserved.
+ *  Version 0.1.4
+ *  Last revision by Mengyao Zhao on 01/30/13.
+ *
+ */
diff --git a/licenses/verman.txt b/licenses/verman.txt
new file mode 100644
index 0000000..4f18176
--- /dev/null
+++ b/licenses/verman.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2014, Daniel McDonald
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+
+* Neither the name of the {organization} nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..bcccc34
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import os
+import platform
+from setuptools import find_packages, setup
+from setuptools.extension import Extension
+
+import numpy as np
+
+__version__ = "0.2.3"
+
+classes = """
+    Development Status :: 1 - Planning
+    License :: OSI Approved :: BSD License
+    Topic :: Software Development :: Libraries
+    Topic :: Scientific/Engineering
+    Topic :: Scientific/Engineering :: Bio-Informatics
+    Programming Language :: Python
+    Programming Language :: Python :: 2
+    Programming Language :: Python :: 2.7
+    Programming Language :: Python :: 3
+    Programming Language :: Python :: 3.3
+    Programming Language :: Python :: 3.4
+    Operating System :: Unix
+    Operating System :: POSIX
+    Operating System :: MacOS :: MacOS X
+"""
+classifiers = [s.strip() for s in classes.split('\n') if s]
+
+description = ('Data structures, algorithms and educational '
+               'resources for bioinformatics.')
+
+with open('README.rst') as f:
+    long_description = f.read()
+
+# Dealing with Cython
+USE_CYTHON = os.environ.get('USE_CYTHON', False)
+ext = '.pyx' if USE_CYTHON else '.c'
+
+# There's a bug in some versions of Python 3.4 that propagates
+# -Werror=declaration-after-statement to extensions, instead of just affecting
+# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
+# details. This acts as a workaround until the next Python 3 release -- thanks
+# Wolfgang Maier (wolma) for the workaround!
+ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
+
+# Users with i686 architectures have reported that adding this flag allows
+# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
+# http://stackoverflow.com/q/26211814/3776794 for details.
+if platform.machine() == 'i686':
+    ssw_extra_compile_args.append('-msse2')
+
+extensions = [
+    Extension("skbio.stats.__subsample",
+              ["skbio/stats/__subsample" + ext]),
+    Extension("skbio.alignment._ssw_wrapper",
+              ["skbio/alignment/_ssw_wrapper" + ext,
+               "skbio/alignment/_lib/ssw.c"],
+              extra_compile_args=ssw_extra_compile_args)
+]
+
+if USE_CYTHON:
+    from Cython.Build import cythonize
+    extensions = cythonize(extensions)
+
+setup(name='scikit-bio',
+      version=__version__,
+      license='BSD',
+      description=description,
+      long_description=long_description,
+      author="scikit-bio development team",
+      author_email="gregcaporaso at gmail.com",
+      maintainer="scikit-bio development team",
+      maintainer_email="gregcaporaso at gmail.com",
+      url='http://scikit-bio.org',
+      test_suite='nose.collector',
+      packages=find_packages(),
+      ext_modules=extensions,
+      include_dirs=[np.get_include()],
+      install_requires=['numpy >= 1.7', 'matplotlib >= 1.1.0',
+                        'scipy >= 0.13.0', 'pandas', 'future', 'six',
+                        'natsort', 'IPython'],
+      extras_require={'test': ["nose >= 0.10.1", "pep8", "flake8",
+                               "python-dateutil"],
+                      'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
+      classifiers=classifiers,
+      package_data={
+          'skbio.io.tests': ['data/*'],
+          'skbio.stats.tests': ['data/*'],
+          'skbio.stats.distance.tests': ['data/*'],
+          'skbio.stats.ordination.tests': ['data/*'],
+          'skbio.parse.sequences.tests': ['data/*'],
+          }
+      )
diff --git a/skbio/__init__.py b/skbio/__init__.py
new file mode 100644
index 0000000..dd0c1ff
--- /dev/null
+++ b/skbio/__init__.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from numpy.testing import Tester
+
+# Add skbio.io to sys.modules to prevent cycles in our imports
+import skbio.io
+# imports included for convenience
+from skbio.sequence import (
+    BiologicalSequence, NucleotideSequence, DNA, DNASequence, RNA, RNASequence,
+    Protein, ProteinSequence)
+from skbio.stats.distance import DistanceMatrix
+from skbio.alignment import (
+    local_pairwise_align_ssw, SequenceCollection, Alignment)
+from skbio.tree import (
+    TreeNode, nj)
+from skbio.parse.sequences import (
+    parse_fasta, parse_fastq, parse_qual, FastaIterator, FastqIterator,
+    SequenceIterator)
+from skbio.io import read, write
+
+skbio.io  # Stop flake8 error
+
+__all__ = ['BiologicalSequence', 'NucleotideSequence', 'DNA', 'DNASequence',
+           'RNA', 'RNASequence', 'Protein', 'ProteinSequence',
+           'DistanceMatrix', 'local_pairwise_align_ssw',
+           'SequenceCollection', 'Alignment', 'TreeNode', 'nj', 'parse_fasta',
+           'parse_fastq', 'parse_qual', 'FastaIterator',
+           'FastqIterator', 'SequenceIterator', 'read',
+           'write']
+
+test = Tester().test
+
+__credits__ = "https://github.com/biocore/scikit-bio/graphs/contributors"
+__version__ = "0.2.3"
+
+mottos = [
+    # 03/15/2014
+    "It's gonna get weird, bro.",
+    # 05/14/2014
+    "no cog yay"
+]
+motto = mottos[-1]
+
+title = r"""
+*                                                    *
+               _ _    _ _          _     _
+              (_) |  (_) |        | |   (_)
+      ___  ___ _| | ___| |_ ______| |__  _  ___
+     / __|/ __| | |/ / | __|______| '_ \| |/ _ \
+     \__ \ (__| |   <| | |_       | |_) | | (_) |
+     |___/\___|_|_|\_\_|\__|      |_.__/|_|\___/
+
+*                                                    *
+"""
+
+art = r"""
+
+           Opisthokonta
+                   \  Amoebozoa
+                    \ /
+                     *    Euryarchaeota
+                      \     |_ Crenarchaeota
+                       \   *
+                        \ /
+                         *
+                        /
+                       /
+                      /
+                     *
+                    / \
+                   /   \
+        Proteobacteria  \
+                       Cyanobacteria
+"""
+
+if __doc__ is None:
+    __doc__ = title + art
+else:
+    __doc__ = title + art + __doc__
+
+if __name__ == '__main__':
+    print(title)
+    print(art)
diff --git a/skbio/_base.py b/skbio/_base.py
new file mode 100644
index 0000000..c5444fe
--- /dev/null
+++ b/skbio/_base.py
@@ -0,0 +1,25 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import with_metaclass
+
+from abc import ABCMeta, abstractmethod
+
+
+class SkbioObject(with_metaclass(ABCMeta, object)):
+    """Abstract base class defining core API common to all scikit-bio objects.
+
+    Public scikit-bio classes should subclass this class to ensure a common,
+    core API is present. All abstract methods and properties defined here must
+    be implemented in subclasses, otherwise they will not be instantiable.
+
+    """
+    @abstractmethod
+    def __str__(self):
+        pass
diff --git a/skbio/alignment/__init__.py b/skbio/alignment/__init__.py
new file mode 100644
index 0000000..ef24fb5
--- /dev/null
+++ b/skbio/alignment/__init__.py
@@ -0,0 +1,250 @@
+r"""
+Sequence collections and alignments (:mod:`skbio.alignment`)
+============================================================
+
+.. currentmodule:: skbio.alignment
+
+This module provides functionality for working with biological sequence
+collections and alignments. These can be composed of generic sequences,
+nucelotide sequences, DNA sequences, and RNA sequences. By default, input is
+not validated, except that sequence ids must be unique, but all
+contructor methods take a validate option which checks different features of
+the input based on ``SequenceCollection`` type.
+
+Data Structures
+---------------
+
+.. autosummary::
+   :toctree: generated/
+
+   SequenceCollection
+   Alignment
+   StockholmAlignment
+
+Optimized (i.e., production-ready) Alignment Algorithms
+-------------------------------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   StripedSmithWaterman
+   AlignmentStructure
+   local_pairwise_align_ssw
+
+Slow (i.e., educational-purposes only) Alignment Algorithms
+-----------------------------------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   global_pairwise_align_nucleotide
+   global_pairwise_align_protein
+   global_pairwise_align
+   local_pairwise_align_nucleotide
+   local_pairwise_align_protein
+   local_pairwise_align
+
+General functionality
+---------------------
+
+.. autosummary::
+   :toctree: generated/
+
+    make_identity_substitution_matrix
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   SequenceCollectionError
+   AlignmentError
+   StockholmParseError
+
+Data Structure Examples
+-----------------------
+>>> from StringIO import StringIO
+>>> from skbio.alignment import SequenceCollection, Alignment
+>>> from skbio.sequence import DNA
+>>> seqs = [DNA("ACC--G-GGTA..", id="seq1"),
+...     DNA("TCC--G-GGCA..", id="seqs2")]
+>>> a1 = Alignment(seqs)
+>>> a1
+<Alignment: n=2; mean +/- std length=13.00 +/- 0.00>
+
+>>> seqs = [DNA("ACCGGG", id="seq1"),
+...     DNA("TCCGGGCA", id="seq2")]
+>>> s1 = SequenceCollection(seqs)
+>>> s1
+<SequenceCollection: n=2; mean +/- std length=7.00 +/- 1.00>
+
+>>> from skbio.parse.sequences import parse_fasta
+>>> fasta_f = StringIO('>seq1\n'
+...                    'CGATGTCGATCGATCGATCGATCAG\n'
+...                    '>seq2\n'
+...                    'CATCGATCGATCGATGCATGCATGCATG\n')
+>>> s1 = SequenceCollection.from_fasta_records(parse_fasta(fasta_f), DNA)
+>>> s1
+<SequenceCollection: n=2; mean +/- std length=26.50 +/- 1.50>
+
+>>> from skbio.sequence import RNA
+>>> from skbio.alignment import StockholmAlignment
+>>> seqs = [RNA("ACC--G-GGGU", id="seq1"),
+...     RNA("TCC--G-GGGA", id="seq2")]
+>>> gc = {'SS_cons': '(((.....)))'}
+>>> sto = StockholmAlignment(seqs, gc=gc)
+>>> print(sto)
+# STOCKHOLM 1.0
+seq1          ACC--G-GGGU
+seq2          TCC--G-GGGA
+#=GC SS_cons  (((.....)))
+//
+>>> sto.gc
+{'SS_cons': '(((.....)))'}
+
+Alignment Algorithm Examples
+----------------------------
+
+Optimized Alignment Algorithm Examples
+--------------------------------------
+Using the convenient ``local_pairwise_align_ssw`` function:
+
+>>> from skbio.alignment import local_pairwise_align_ssw
+>>> alignment = local_pairwise_align_ssw(
+...                 "ACTAAGGCTCTCTACCCCTCTCAGAGA",
+...                 "ACTAAGGCTCCTAACCCCCTTTTCTCAGA"
+...             )
+>>> print alignment
+>query
+ACTAAGGCTCTC-TACCC----CTCTCAGA
+>target
+ACTAAGGCTC-CTAACCCCCTTTTCTCAGA
+<BLANKLINE>
+
+Using the ``StripedSmithWaterman`` object:
+
+>>> from skbio.alignment import StripedSmithWaterman
+>>> query = StripedSmithWaterman("ACTAAGGCTCTCTACCCCTCTCAGAGA")
+>>> alignment = query("AAAAAACTCTCTAAACTCACTAAGGCTCTCTACCCCTCTTCAGAGAAGTCGA")
+>>> print alignment
+ACTAAGGCTC...
+ACTAAGGCTC...
+Score: 49
+Length: 28
+
+Using the ``StripedSmithWaterman`` object for multiple targets in an efficient
+way and finding the aligned sequence representations:
+
+>>> from skbio.alignment import StripedSmithWaterman
+>>> alignments = []
+>>> target_sequences = [
+...     "GCTAACTAGGCTCCCTTCTACCCCTCTCAGAGA",
+...     "GCCCAGTAGCTTCCCAATATGAGAGCATCAATTGTAGATCGGGCC",
+...     "TCTATAAGATTCCGCATGCGTTACTTATAAGATGTCTCAACGG",
+...     "TAGAGATTAATTGCCACTGCCAAAATTCTG"
+... ]
+>>> query_sequence = "ACTAAGGCTCTCTACCCCTCTCAGAGA"
+>>> query = StripedSmithWaterman(query_sequence)
+>>> for target_sequence in target_sequences:
+...     alignment = query(target_sequence)
+...     alignments.append(alignment)
+...
+>>> print alignments[0]
+ACTAAGGCT-...
+ACT-AGGCTC...
+Score: 38
+Length: 30
+>>> print alignments[0].aligned_query_sequence
+ACTAAGGCT---CTCTACCCCTCTCAGAGA
+>>> print alignments[0].aligned_target_sequence
+ACT-AGGCTCCCTTCTACCCCTCTCAGAGA
+
+Slow Alignment Algorithm Examples
+---------------------------------
+scikit-bio also provides pure-Python implementations of Smith-Waterman and
+Needleman-Wunsch alignment. These are much slower than the methods described
+above, but serve as useful educational examples as they're simpler to
+experiment with. Functions are provided for local and global alignment of
+protein and nucleotide sequences. The ``global*`` and ``local*`` functions
+differ in the underlying algorithm that is applied (``global*`` uses Needleman-
+Wunsch while ``local*`` uses Smith-Waterman), and ``*protein`` and
+``*nucleotide`` differ in their default scoring of matches, mismatches, and
+gaps.
+
+Here we locally align a pair of protein sequences using gap open penalty
+of 11 and a gap extend penalty of 1 (in other words, it is much more
+costly to open a new gap than extend an existing one).
+
+>>> from skbio.alignment import local_pairwise_align_protein
+>>> s1 = "HEAGAWGHEE"
+>>> s2 = "PAWHEAE"
+>>> r = local_pairwise_align_protein(s1, s2, 11, 1)
+
+This returns an ``skbio.Alignment`` object. We can look at the aligned
+sequences:
+
+>>> print(str(r[0]))
+AWGHE
+>>> print(str(r[1]))
+AW-HE
+
+We can identify the start and end positions of each aligned sequence
+as follows:
+
+>>> r.start_end_positions()
+[(4, 8), (1, 4)]
+
+And we can view the score of the alignment using the ``score`` method:
+
+>>> r.score()
+25.0
+
+Similarly, we can perform global alignment of nucleotide sequences, and print
+the resulting alignment as fasta records:
+
+>>> from skbio.alignment import global_pairwise_align_nucleotide
+>>> s1 = "GCGTGCCTAAGGTATGCAAG"
+>>> s2 = "ACGTGCCTAGGTACGCAAG"
+>>> r = global_pairwise_align_nucleotide(s1, s2)
+>>> print(r.to_fasta())
+>0
+GCGTGCCTAAGGTATGCAAG
+>1
+ACGTGCCTA-GGTACGCAAG
+<BLANKLINE>
+
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._alignment import Alignment, SequenceCollection, StockholmAlignment
+from ._pairwise import (
+    local_pairwise_align_nucleotide, local_pairwise_align_protein,
+    local_pairwise_align, global_pairwise_align_nucleotide,
+    global_pairwise_align_protein, global_pairwise_align,
+    make_identity_substitution_matrix
+)
+from skbio.alignment._ssw_wrapper import (
+    StripedSmithWaterman, local_pairwise_align_ssw, AlignmentStructure)
+from ._exception import (SequenceCollectionError, StockholmParseError,
+                         AlignmentError)
+
+__all__ = ['Alignment', 'SequenceCollection', 'StockholmAlignment',
+           'StripedSmithWaterman', 'AlignmentStructure',
+           'local_pairwise_align_ssw', 'SequenceCollectionError',
+           'StockholmParseError', 'AlignmentError', 'global_pairwise_align',
+           'global_pairwise_align_nucleotide', 'global_pairwise_align_protein',
+           'local_pairwise_align', 'local_pairwise_align_nucleotide',
+           'local_pairwise_align_protein', 'make_identity_substitution_matrix']
+
+test = Tester().test
diff --git a/skbio/alignment/_alignment.py b/skbio/alignment/_alignment.py
new file mode 100644
index 0000000..c2dcbc8
--- /dev/null
+++ b/skbio/alignment/_alignment.py
@@ -0,0 +1,2095 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import zip, range
+from future.utils import viewkeys, viewitems
+from six import StringIO
+
+import warnings
+from collections import Counter, defaultdict, OrderedDict
+
+import numpy as np
+from scipy.stats import entropy
+
+from skbio._base import SkbioObject
+from skbio.stats.distance import DistanceMatrix
+from skbio.io.util import open_file
+from ._exception import (SequenceCollectionError, StockholmParseError,
+                         AlignmentError)
+
+
+class SequenceCollection(SkbioObject):
+    """Class for storing collections of biological sequences.
+
+    Parameters
+    ----------
+    seqs : list of `skbio.sequence.BiologicalSequence` objects
+        The `skbio.sequence.BiologicalSequence` objects to load into
+        a new `SequenceCollection` object.
+    validate : bool, optional
+        If True, runs the `is_valid` method after construction and raises
+        `SequenceCollectionError` if ``is_valid == False``.
+
+    Raises
+    ------
+    skbio.alignment.SequenceCollectionError
+        If ``validate == True`` and ``is_valid == False``.
+
+    See Also
+    --------
+    skbio.sequence.BiologicalSequence
+    skbio.sequence.NucleotideSequence
+    skbio.sequence.DNASequence
+    skbio.sequence.RNASequence
+    Alignment
+    skbio.parse.sequences
+    skbio.parse.sequences.parse_fasta
+
+    Examples
+    --------
+    >>> from skbio.alignment import SequenceCollection
+    >>> from skbio.sequence import DNA
+    >>> sequences = [DNA('ACCGT', id="seq1"),
+    ...              DNA('AACCGGT', id="seq2")]
+    >>> s1 = SequenceCollection(sequences)
+    >>> s1
+    <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
+
+    """
+    default_write_format = 'fasta'
+
+    @classmethod
+    def from_fasta_records(cls, fasta_records, seq_constructor,
+                           validate=False):
+        r"""Initialize a `SequenceCollection` object
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``from_fasta_records`` will be removed in scikit-bio 0.3.0. It is
+           replaced by ``read``, which is a more general method for
+           deserializing FASTA-formatted files. ``read`` supports multiple file
+           formats, automatic file format detection, etc. by taking advantage
+           of scikit-bio's I/O registry system. See :mod:`skbio.io` for more
+           details.
+
+        Parameters
+        ----------
+        fasta_records : iterator of tuples
+            The records to load into a new `SequenceCollection` object. These
+            should be tuples of ``(sequence_id, sequence)``.
+        seq_constructor : skbio.sequence.BiologicalSequence
+        validate : bool, optional
+            If True, runs the `is_valid` method after construction and raises
+            `SequenceCollectionError` if ``is_valid == False``.
+
+        Returns
+        -------
+        SequenceCollection (or a derived class)
+            The new `SequenceCollection` object.
+
+        Raises
+        ------
+        skbio.alignment.SequenceCollectionError
+            If ``validate == True`` and ``is_valid == False``.
+
+        See Also
+        --------
+        skbio.sequence.BiologicalSequence
+        skbio.sequence.NucleotideSequence
+        skbio.sequence.DNASequence
+        skbio.sequence.RNASequence
+        Alignment
+        skbio.parse.sequences
+        skbio.parse.sequences.parse_fasta
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.parse.sequences import parse_fasta
+        >>> from StringIO import StringIO
+        >>> from skbio.sequence import DNA
+        >>> fasta_f = StringIO('>seq1\nACCGT\n>seq2\nAACCGGT\n')
+        >>> s1 = SequenceCollection.from_fasta_records(
+        ...     parse_fasta(fasta_f), DNA)
+        >>> s1
+        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
+
+        >>> records = [('seq1', 'ACCGT'), ('seq2', 'AACCGGT')]
+        >>> s1 = SequenceCollection.from_fasta_records(records, DNA)
+        >>> s1
+        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
+
+        """
+        warnings.warn(
+            "SequenceCollection.from_fasta_records is deprecated and will be "
+            "removed in scikit-bio 0.3.0. Please update your code to use "
+            "SequenceCollection.read.", DeprecationWarning)
+
+        data = []
+        for seq_id, seq in fasta_records:
+            try:
+                id, description = seq_id.split(None, 1)
+            except ValueError:
+                id = seq_id.strip()
+                description = None
+            data.append(seq_constructor(seq, id=id,
+                                        description=description))
+
+        return cls(data, validate=validate)
+
+    def __init__(self, seqs, validate=False):
+        self._data = seqs
+        self._id_to_index = {}
+        for i, seq in enumerate(self._data):
+            id = seq.id
+            if id in self:
+                raise SequenceCollectionError(
+                    "All sequence ids must be unique, but "
+                    "id '%s' is present multiple times." % id)
+            else:
+                self._id_to_index[seq.id] = i
+
+        # This is bad because we're making a second pass through the sequence
+        # collection to validate. We'll want to avoid this, but it's tricky
+        # because different subclasses will want to define their own is_valid
+        # methods.
+        if validate and not self.is_valid():
+            raise SequenceCollectionError(
+                "%s failed to validate." % self.__class__.__name__)
+
+    def __contains__(self, id):
+        r"""The in operator.
+
+        Parameters
+        ----------
+        id : str
+            The id to look up in the `SequenceCollection`.
+
+        Returns
+        -------
+        bool
+            Indicates whether `id` corresponds to a sequence id
+            in the `SequenceCollection`.
+
+        .. shownumpydoc
+
+        """
+        return id in self._id_to_index
+
+    def __eq__(self, other):
+        r"""The equality operator.
+
+        Parameters
+        ----------
+        other : `SequenceCollection`
+            The `SequenceCollection` to test for equality against.
+
+        Returns
+        -------
+        bool
+            Indicates whether `self` and `other` are equal.
+
+        Notes
+        -----
+        `SequenceCollection` objects are equal if they are the same type,
+        contain the same number of sequences, and if each of the
+        `skbio.sequence.BiologicalSequence` objects, in order, are equal.
+
+        .. shownumpydoc
+
+        """
+        if self.__class__ != other.__class__:
+            return False
+        elif len(self) != len(other):
+            return False
+        else:
+            for self_seq, other_seq in zip(self, other):
+                if self_seq != other_seq:
+                    return False
+        return True
+
+    def __getitem__(self, index):
+        r"""The indexing operator.
+
+        Parameters
+        ----------
+        index : int, str
+            The position or sequence id of the
+            `skbio.sequence.BiologicalSequence` to return from the
+            `SequenceCollection`.
+
+        Returns
+        -------
+        `skbio.sequence.BiologicalSequence`
+            The `skbio.sequence.BiologicalSequence` at the specified
+            index in the `SequenceCollection`.
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('ACCGT', id="seq1"),
+        ...              DNA('AACCGGT', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> s1[0]
+        <DNASequence: ACCGT (length: 5)>
+        >>> s1["seq1"]
+        <DNASequence: ACCGT (length: 5)>
+
+        .. shownumpydoc
+
+        """
+        if isinstance(index, str):
+            return self.get_seq(index)
+        else:
+            return self._data[index]
+
+    def __iter__(self):
+        r"""The iter operator.
+
+        Returns
+        -------
+        iterator
+            `skbio.sequence.BiologicalSequence` iterator for the
+            `SequenceCollection`.
+
+        .. shownumpydoc
+
+        """
+        return iter(self._data)
+
+    def __len__(self):
+        r"""The len operator.
+
+        Returns
+        -------
+        int
+            The number of sequences in the `SequenceCollection`.
+
+        .. shownumpydoc
+
+        """
+        return self.sequence_count()
+
+    def __ne__(self, other):
+        r"""The inequality operator.
+
+        Parameters
+        ----------
+        other : `SequenceCollection`
+
+        Returns
+        -------
+        bool
+            Indicates whether self and other are not equal.
+
+        Notes
+        -----
+        See `SequenceCollection.__eq__` for a description of what it means for
+        a pair of `SequenceCollection` objects to be equal.
+
+        .. shownumpydoc
+
+        """
+        return not self.__eq__(other)
+
+    def __repr__(self):
+        r"""The repr method.
+
+        Returns
+        -------
+        str
+            Returns a string representation of the object.
+
+        Notes
+        -----
+        String representation contains the class name, the number of sequences
+        in the `SequenceCollection` (n), and the mean and standard deviation
+        sequence length.
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('ACCGT', id="seq1"),
+        ...              DNA('AACCGGT', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> print(repr(s1))
+        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
+
+        .. shownumpydoc
+
+        """
+        cn = self.__class__.__name__
+        count, center, spread = self.distribution_stats()
+        return "<%s: n=%d; mean +/- std length=%.2f +/- %.2f>" \
+            % (cn, count, center, spread)
+
+    def __reversed__(self):
+        """The reversed method.
+
+        Returns
+        -------
+        iterator
+            `skbio.sequence.BiologicalSequence` iterator for the
+            `SequenceCollection` in reverse order.
+
+        .. shownumpydoc
+
+        """
+        return reversed(self._data)
+
+    def __str__(self):
+        r"""The str method.
+
+        Returns
+        -------
+        str
+            Fasta-formatted string of all sequences in the object.
+
+        .. shownumpydoc
+
+        """
+        fh = StringIO()
+        self.write(fh, format='fasta')
+        fasta_str = fh.getvalue()
+        fh.close()
+        return fasta_str
+
+    def distances(self, distance_fn):
+        """Compute distances between all pairs of sequences
+
+        Parameters
+        ----------
+        distance_fn : function
+            Function for computing the distance between a pair of sequences.
+            This must take two sequences as input (as
+            `skbio.sequence.BiologicalSequence` objects) and return a
+            single integer or float value.
+
+        Returns
+        -------
+        skbio.DistanceMatrix
+            Matrix containing the distances between all pairs of sequences.
+
+        Raises
+        ------
+        skbio.util.exception.BiologicalSequenceError
+            If ``len(self) != len(other)`` and ``distance_fn`` ==
+            ``scipy.spatial.distance.hamming``.
+
+        See Also
+        --------
+        skbio.DistanceMatrix
+        scipy.spatial.distance.hamming
+
+        Examples
+        --------
+        >>> from scipy.spatial.distance import hamming
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA
+        >>> seqs = [DNA("ACCGGGTT", id="s1"),
+        ...         DNA("ACTTGGTT", id="s2"),
+        ...         DNA("ACTAGGTT", id="s3")]
+        >>> a1 = SequenceCollection(seqs)
+        >>> print(a1.distances(hamming))
+        3x3 distance matrix
+        IDs:
+        's1', 's2', 's3'
+        Data:
+        [[ 0.     0.25   0.25 ]
+         [ 0.25   0.     0.125]
+         [ 0.25   0.125  0.   ]]
+
+        """
+        sequence_count = self.sequence_count()
+        dm = np.zeros((sequence_count, sequence_count))
+        ids = []
+        for i in range(sequence_count):
+            self_i = self[i]
+            ids.append(self_i.id)
+            for j in range(i):
+                dm[i, j] = dm[j, i] = self_i.distance(self[j], distance_fn)
+        return DistanceMatrix(dm, ids)
+
+    def distribution_stats(self, center_f=np.mean, spread_f=np.std):
+        r"""Return sequence count, and center and spread of sequence lengths
+
+        Parameters
+        ----------
+        center_f : function
+            Should take a list-like object and return a single value
+            representing the center of the distribution.
+        spread_f : function
+            Should take a list-like object and return a single value
+            representing the spread of the distribution.
+
+        Returns
+        -------
+        tuple of (int, float, float)
+            The sequence count, center of length distribution, spread of length
+            distribution.
+
+        Notes
+        -----
+        Alternatives for `center_f` and `spread_f` could be median and median
+        absolute deviation.
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('ACCGT', id="seq1"),
+        ...              DNA('AACCGGT', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> s1.distribution_stats()
+        (2, 6.0, 1.0)
+
+        """
+        if self.is_empty():
+            return (0, 0.0, 0.0)
+        else:
+            sequence_count = self.sequence_count()
+            sequence_lengths = self.sequence_lengths()
+            return (sequence_count, center_f(sequence_lengths),
+                    spread_f(sequence_lengths))
+
+    def degap(self):
+        r"""Return a new `SequenceCollection` with all gap characters removed.
+
+        Returns
+        -------
+        SequenceCollection
+            A new `SequenceCollection` where
+            `skbio.sequence.BiologicalSequence.degap` has been called on
+            each sequence.
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('A--CCGT.', id="seq1"),
+        ...              DNA('.AACCG-GT.', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> s2 = s1.degap()
+        >>> s2
+        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
+
+        """
+        return SequenceCollection([seq.degap() for seq in self])
+
+    def get_seq(self, id):
+        r"""Return a sequence from the `SequenceCollection` by its id.
+
+        Parameters
+        ----------
+        id : str
+            The id of the sequence to return.
+
+        Returns
+        -------
+        skbio.sequence.BiologicalSequence
+            The `skbio.sequence.BiologicalSequence` with `id`.
+
+        Raises
+        ------
+        KeyError
+            If `id` is not in the `SequenceCollection` object.
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('A--CCGT.', id="seq1"),
+        ...              DNA('.AACCG-GT.', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> print(s1['seq1'])
+        A--CCGT.
+
+        """
+        return self[self._id_to_index[id]]
+
+    def ids(self):
+        """Returns the `BiologicalSequence` ids
+
+        Returns
+        -------
+        list
+            The ordered list of ids for the
+            `skbio.sequence.BiologicalSequence` objects in the
+            `SequenceCollection`.
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('A--CCGT.', id="seq1"),
+        ...              DNA('.AACCG-GT.', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> print(s1.ids())
+        ['seq1', 'seq2']
+
+        """
+        return [seq.id for seq in self]
+
+    def update_ids(self, ids=None, fn=None, prefix=""):
+        """Update sequence IDs on the sequence collection.
+
+        IDs can be updated by providing a sequence of new IDs (`ids`) or a
+        function that maps current IDs to new IDs (`fn`).
+
+        Default behavior (if `ids` and `fn` are not provided) is to create new
+        IDs that are unique postive integers (starting at 1) cast as strings,
+        optionally preceded by `prefix`. For example, ``('1', '2', '3', ...)``.
+
+        Parameters
+        ----------
+        ids : sequence of str, optional
+            New IDs to update on the sequence collection.
+        fn : function, optional
+            Function accepting a sequence of current IDs and returning a
+            sequence of new IDs to update on the sequence collection.
+        prefix : str, optional
+            If `ids` and `fn` are both ``None``, `prefix` is prepended to each
+            new integer-based ID (see description of default behavior above).
+
+        Returns
+        -------
+        SequenceCollection
+            New ``SequenceCollection`` (or subclass) containing sequences with
+            updated IDs.
+        dict
+            Mapping of new IDs to old IDs.
+
+        Raises
+        ------
+        SequenceCollectionError
+            If both `ids` and `fn` are provided, `prefix` is provided with
+            either `ids` or `fn`, or the number of new IDs does not match the
+            number of sequences in the sequence collection.
+
+        Notes
+        -----
+        The default behavior can be useful when writing sequences out for use
+        with programs that are picky about their sequence IDs
+        (e.g., RAxML [1]_).
+
+        References
+        ----------
+        .. [1] RAxML Version 8: A tool for Phylogenetic Analysis and
+           Post-Analysis of Large Phylogenies". In Bioinformatics, 2014
+
+        Examples
+        --------
+        Define a sequence collection containing two sequences with IDs "abc"
+        and "def":
+
+        >>> from skbio import DNA, SequenceCollection
+        >>> sequences = [DNA('A--CCGT.', id="abc"),
+        ...              DNA('.AACCG-GT.', id="def")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> s1.ids()
+        ['abc', 'def']
+
+        Update the IDs in the sequence collection, obtaining a new sequence
+        collection with IDs that are integer-based:
+
+        >>> s2, new_to_old_ids = s1.update_ids()
+        >>> s2.ids()
+        ['1', '2']
+
+        Alternatively, we can specify a function to map the current IDs to new
+        IDs. Let's define a function that appends ``'-new'`` to each ID:
+
+        >>> def id_mapper(ids):
+        ...     return [id_ + '-new' for id_ in ids]
+        >>> s3, new_to_old_ids = s1.update_ids(fn=id_mapper)
+        >>> s3.ids()
+        ['abc-new', 'def-new']
+
+        We can also directly update the IDs with a new sequence of IDs:
+
+        >>> s4, new_to_old_ids = s1.update_ids(ids=['ghi', 'jkl'])
+        >>> s4.ids()
+        ['ghi', 'jkl']
+
+        """
+        if ids is not None and fn is not None:
+            raise SequenceCollectionError("ids and fn cannot both be "
+                                          "provided.")
+        if (ids is not None and prefix) or (fn is not None and prefix):
+            raise SequenceCollectionError("prefix cannot be provided if ids "
+                                          "or fn is provided.")
+
+        if ids is not None:
+            def fn(_):
+                return ids
+
+        elif fn is None:
+            def fn(_):
+                new_ids = []
+                for i in range(1, len(self) + 1):
+                    new_ids.append("%s%d" % (prefix, i))
+                return new_ids
+
+        old_ids = self.ids()
+        new_ids = fn(old_ids)
+
+        if len(new_ids) != len(old_ids):
+            raise SequenceCollectionError(
+                "Number of new IDs must be equal to the number of existing "
+                "IDs (%d != %d)." % (len(new_ids), len(old_ids)))
+
+        new_to_old_ids = dict(zip(new_ids, old_ids))
+
+        new_seqs = []
+        for new_id, seq in zip(new_ids, self):
+            new_seqs.append(seq.copy(id=new_id))
+
+        return self.__class__(new_seqs), new_to_old_ids
+
+    def int_map(self, prefix=""):
+        """Create an integer-based mapping of sequence ids
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``SequenceCollection.int_map`` will be removed in scikit-bio 0.3.0
+           in favor of ``SequenceCollection.update_ids``, which provides a
+           generalized way of updating IDs on a ``SequenceCollection``. The
+           default behavior of ``SequenceCollection.update_ids`` matches the
+           behavior in ``int_map``, except that a new ``SequenceCollection`` is
+           returned instead of a ``dict``.
+
+        Parameters
+        ----------
+        prefix : str
+            String prefix for new integer-based ids.
+
+        Returns
+        -------
+        dict
+            Mapping of new ids to sequences.
+        dict
+            Mapping of new ids to old ids.
+
+        Notes
+        -----
+        This is useful when writing sequences out for use with programs that
+        are picky about their sequence ids (e.g., raXML).
+
+        The integer-based ids will be strings, for consistency (e.g., if prefix
+        is passed) and begin at 1.
+
+        References
+        ----------
+        RAxML Version 8: A tool for Phylogenetic Analysis and Post-Analysis of
+        Large Phylogenies". In Bioinformatics, 2014
+
+        """
+        warnings.warn(
+            "SequenceCollection.int_map is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use "
+            "SequenceCollection.update_ids instead.", DeprecationWarning)
+
+        int_keys = []
+        int_map = []
+        for i, seq in enumerate(self):
+            k = ("%s%d" % (prefix, i+1))
+            int_map.append((k, seq))
+            int_keys.append((k, seq.id))
+        return dict(int_map), dict(int_keys)
+
+    def is_empty(self):
+        """Return True if the SequenceCollection is empty
+
+        Returns
+        -------
+        bool
+            ``True`` if `self` contains zero sequences, and ``False``
+            otherwise.
+
+        """
+        return self.sequence_count() == 0
+
+    def is_valid(self):
+        """Return True if the SequenceCollection is valid
+
+        Returns
+        -------
+        bool
+            ``True`` if `self` is valid, and ``False`` otherwise.
+
+        Notes
+        -----
+        Validity is defined as having no sequences containing characters
+        outside of their valid character sets.
+
+        See Also
+        --------
+        skbio.alignment.BiologicalSequence.is_valid
+
+        Examples
+        --------
+        >>> from skbio.alignment import SequenceCollection
+        >>> from skbio.sequence import DNA, RNA
+        >>> sequences = [DNA('ACCGT', id="seq1"),
+        ...              DNA('AACCGGT', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> print(s1.is_valid())
+        True
+        >>> sequences = [RNA('ACCGT', id="seq1"),
+        ...              RNA('AACCGGT', id="seq2")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> print(s1.is_valid())
+        False
+
+        """
+        return self._validate_character_set()
+
+    def iteritems(self):
+        """Generator of id, sequence tuples
+
+        Returns
+        -------
+        generator of tuples
+            Each tuple contains ordered
+            (`skbio.sequence.BiologicalSequence.id`,
+            `skbio.sequence.BiologicalSequence`) pairs.
+
+        """
+        for seq in self:
+            yield seq.id, seq
+
+    def lower(self):
+        """Converts all sequences to lowercase
+
+        Returns
+        -------
+        SequenceCollection
+            New `SequenceCollection` object where
+            `skbio.sequence.BiologicalSequence.lower()` has been called
+            on each sequence.
+
+        See Also
+        --------
+        skbio.sequence.BiologicalSequence.lower
+        upper
+
+        """
+        return self.__class__([seq.lower() for seq in self])
+
+    def sequence_count(self):
+        """Return the count of sequences in the `SequenceCollection`
+
+        Returns
+        -------
+        int
+            The number of sequences in the `SequenceCollection`.
+
+        See Also
+        --------
+        sequence_lengths
+        Alignment.sequence_length
+
+        """
+        return len(self._data)
+
+    def k_word_frequencies(self, k, overlapping=True):
+        """Return k-word frequencies for sequences in ``SequenceCollection``.
+
+        Parameters
+        ----------
+        k : int
+            The word length.
+        overlapping : bool, optional
+            Defines whether the k-words should be overlapping or not
+            overlapping. This is only relevant when `k` > 1.
+
+        Returns
+        -------
+        list
+            List of ``collections.defaultdict`` objects, one for each sequence
+            in the ``SequenceCollection``, representing the frequency of each
+            k-word in each sequence of the ``SequenceCollection``.
+
+        See Also
+        --------
+        Alignment.position_frequencies
+
+        Examples
+        --------
+        >>> from skbio import SequenceCollection, DNA
+        >>> sequences = [DNA('A', id="seq1"),
+        ...              DNA('AT', id="seq2"),
+        ...              DNA('TTTT', id="seq3")]
+        >>> s1 = SequenceCollection(sequences)
+        >>> for freqs in s1.k_word_frequencies(1):
+        ...     print(freqs)
+        defaultdict(<type 'float'>, {'A': 1.0})
+        defaultdict(<type 'float'>, {'A': 0.5, 'T': 0.5})
+        defaultdict(<type 'float'>, {'T': 1.0})
+        >>> for freqs in s1.k_word_frequencies(2):
+        ...     print(freqs)
+        defaultdict(<type 'float'>, {})
+        defaultdict(<type 'float'>, {'AT': 1.0})
+        defaultdict(<type 'float'>, {'TT': 1.0})
+
+        """
+        return [s.k_word_frequencies(k, overlapping) for s in self]
+
+    def sequence_lengths(self):
+        """Return lengths of the sequences in the `SequenceCollection`
+
+        Returns
+        -------
+        list
+            The ordered list of sequence lengths.
+
+        See Also
+        --------
+        sequence_count
+
+        """
+        return [len(seq) for seq in self]
+
+    def to_fasta(self):
+        """Return fasta-formatted string representing the `SequenceCollection`
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``to_fasta`` will be removed in scikit-bio 0.3.0. It is replaced by
+           ``write``, which is a more general method for serializing
+           FASTA-formatted files. ``write`` supports multiple file formats by
+           taking advantage of scikit-bio's I/O registry system. See
+           :mod:`skbio.io` for more details.
+
+        Returns
+        -------
+        str
+            A fasta-formatted string representing the `SequenceCollection`.
+
+        See Also
+        --------
+        skbio.parse.sequences.parse_fasta
+        """
+        warnings.warn(
+            "SequenceCollection.to_fasta is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use "
+            "SequenceCollection.write.", DeprecationWarning)
+
+        return ''.join([seq.to_fasta() for seq in self._data])
+
+    def toFasta(self):
+        """Return fasta-formatted string representing the `SequenceCollection`
+
+        .. note:: Deprecated in skbio 0.3.0
+                  `SequenceCollection.toFasta` will be removed in skbio 0.2.0,
+                  it is replaced by `SequenceCollection.to_fasta` as the latter
+                  adheres to PEP8 naming conventions. This is necessary to keep
+                  in place now as these objects are sometimes passed into
+                  code that expects a `cogent.alignment.Alignment` object
+                  (e.g., PyNAST), so we need to support the method with this
+                  name.
+
+        Returns
+        -------
+        str
+            A fasta-formatted string representing the `SequenceCollection`.
+
+        """
+        warnings.warn(
+            "SequenceCollection.toFasta() is deprecated. You should use "
+            "SequenceCollection.to_fasta().", DeprecationWarning)
+        return self.to_fasta()
+
+    def upper(self):
+        """Converts all sequences to uppercase
+
+        Returns
+        -------
+        SequenceCollection
+            New `SequenceCollection` object where `BiologicalSequence.upper()`
+            has been called on each sequence.
+
+        See Also
+        --------
+        BiologicalSequence.upper
+        lower
+
+        """
+        return self.__class__([seq.upper() for seq in self])
+
+    def _validate_character_set(self):
+        """Return ``True`` if all sequences are valid, ``False`` otherwise
+        """
+        for seq in self:
+            if not seq.is_valid():
+                return False
+        return True
+
+
+class Alignment(SequenceCollection):
+    """Class for storing alignments of biological sequences.
+
+    The ``Alignment`` class adds convenience methods to the
+    ``SequenceCollection`` class to make it easy to work with alignments of
+    biological sequences.
+
+    Parameters
+    ----------
+    seqs : list of `skbio.sequence.BiologicalSequence` objects
+        The `skbio.sequence.BiologicalSequence` objects to load into
+        a new `Alignment` object.
+    validate : bool, optional
+        If True, runs the `is_valid` method after construction and raises
+        `SequenceCollectionError` if ``is_valid == False``.
+    score : float, optional
+        The score of the alignment, if applicable (usually only if the
+        alignment was just constructed).
+    start_end_positions : iterable of two-item tuples, optional
+        The start and end positions of each input sequence in the alignment,
+        if applicable (usually only if the alignment was just constructed using
+        a local alignment algorithm). Note that these should be indexes into
+        the unaligned sequences, though the `Alignment` object itself doesn't
+        know about these.
+
+    Raises
+    ------
+    skbio.alignment.SequenceCollectionError
+        If ``validate == True`` and ``is_valid == False``.
+    skbio.alignment.AlignmentError
+        If not all the sequences have the same length.
+
+    Notes
+    -----
+    By definition, all of the sequences in an alignment must be of the same
+    length. For this reason, an alignment can be thought of as a matrix of
+    sequences (rows) by positions (columns).
+
+    See Also
+    --------
+    skbio.sequence.BiologicalSequence
+    skbio.sequence.NucleotideSequence
+    skbio.sequence.DNASequence
+    skbio.sequence.RNASequence
+    SequenceCollection
+    skbio.parse.sequences
+    skbio.parse.sequences.parse_fasta
+
+    Examples
+    --------
+    >>> from skbio.alignment import Alignment
+    >>> from skbio.sequence import DNA
+    >>> sequences = [DNA('A--CCGT', id="seq1"),
+    ...              DNA('AACCGGT', id="seq2")]
+    >>> a1 = Alignment(sequences)
+    >>> a1
+    <Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
+
+    """
+
+    def __init__(self, seqs, validate=False, score=None,
+                 start_end_positions=None):
+        super(Alignment, self).__init__(seqs, validate)
+
+        if not self._validate_lengths():
+            raise AlignmentError("All sequences need to be of equal length.")
+
+        if score is not None:
+            self._score = float(score)
+        self._start_end_positions = start_end_positions
+
+    def distances(self, distance_fn=None):
+        """Compute distances between all pairs of sequences
+
+        Parameters
+        ----------
+        distance_fn : function, optional
+            Function for computing the distance between a pair of sequences.
+            This must take two sequences as input (as
+            `skbio.sequence.BiologicalSequence` objects) and return a
+            single integer or float value. Defaults to
+            `scipy.spatial.distance.hamming`.
+
+        Returns
+        -------
+        skbio.DistanceMatrix
+            Matrix containing the distances between all pairs of sequences.
+
+        Raises
+        ------
+        skbio.util.exception.BiologicalSequenceError
+            If ``len(self) != len(other)`` and ``distance_fn`` ==
+            ``scipy.spatial.distance.hamming``.
+
+        See Also
+        --------
+        skbio.DistanceMatrix
+        scipy.spatial.distance.hamming
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> seqs = [DNA("A-CCGGG", id="s1"),
+        ...         DNA("ATCC--G", id="s2"),
+        ...         DNA("ATCCGGA", id="s3")]
+        >>> a1 = Alignment(seqs)
+        >>> print(a1.distances())
+        3x3 distance matrix
+        IDs:
+        's1', 's2', 's3'
+        Data:
+        [[ 0.          0.42857143  0.28571429]
+         [ 0.42857143  0.          0.42857143]
+         [ 0.28571429  0.42857143  0.        ]]
+
+        """
+        return super(Alignment, self).distances(distance_fn)
+
+    def score(self):
+        """Returns the score of the alignment.
+
+        Returns
+        -------
+        float, None
+            The score of the alignment, or ``None`` if this was not provided on
+            object construction.
+
+        Notes
+        -----
+        This value will often be ``None``, as it is generally only going to be
+        provided on construction if the alignment itself was built within
+        scikit-bio.
+
+        """
+        return self._score
+
+    def start_end_positions(self):
+        """Returns the (start, end) positions for each aligned sequence.
+
+        Returns
+        -------
+        list, None
+            The list of sequence start/end positions, or ``None`` if this was
+            not provided on object construction.
+
+        Notes
+        -----
+        The start/end positions indicate the range of the unaligned sequences
+        in the alignment. For example, if local alignment were performed on the
+        sequences ACA and TACAT, depending on the specific algorithm that was
+        used to perform the alignment, the start/end positions would likely be:
+        ``[(0,2), (1,3)]``. This indicates that the first and last positions of
+        the second sequence were not included in the alignment, and the
+        aligned sequences were therefore:
+        ACA
+        ACA
+
+        This value will often be ``None``, as it is generally only going to be
+        provided on construction if the alignment itself was built within
+        scikit-bio.
+
+        """
+        return self._start_end_positions
+
+    def subalignment(self, seqs_to_keep=None, positions_to_keep=None,
+                     invert_seqs_to_keep=False,
+                     invert_positions_to_keep=False):
+        """Returns new `Alignment` that is a subset of the current `Alignment`
+
+        Parameters
+        ----------
+        seqs_to_keep : list, optional
+            A list of sequence ids to be retained in the resulting
+            `Alignment`. If this is not passed, the default will be to retain
+            all sequences.
+        positions_to_keep : list, optional
+            A list of position indices to be retained in the resulting
+            `Alignment`. If this is not passed, the default will be to retain
+            all positions.
+        invert_seqs_to_keep : bool, optional
+            If `True`, the sequences identified in `seqs_to_keep` will be
+            discarded, rather than retained.
+        invert_positions_to_keep : bool, optional
+            If `True`, the sequences identified in `positions_to_keep` will be
+            discarded, rather than retained.
+
+        Returns
+        -------
+        Alignment
+            The specified subalignment.
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> seqs = [DNA("A-CCGGG", id="s1"),
+        ...         DNA("ATCC--G", id="s2"),
+        ...         DNA("ATCCGGA", id="s3")]
+        >>> a1 = Alignment(seqs)
+        >>> a1
+        <Alignment: n=3; mean +/- std length=7.00 +/- 0.00>
+        >>> a1.subalignment(seqs_to_keep=["s1", "s2"])
+        <Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
+        >>> a1.subalignment(seqs_to_keep=["s1", "s2"],
+        ...         invert_seqs_to_keep=True)
+        <Alignment: n=1; mean +/- std length=7.00 +/- 0.00>
+        >>> a1.subalignment(positions_to_keep=[0, 2, 3, 5])
+        <Alignment: n=3; mean +/- std length=4.00 +/- 0.00>
+        >>> a1.subalignment(positions_to_keep=[0, 2, 3, 5],
+        ...         invert_positions_to_keep=True)
+        <Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
+        >>> a1.subalignment(seqs_to_keep=["s1", "s2"],
+        ...         positions_to_keep=[0, 2, 3, 5])
+        <Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
+
+        """
+        # if seqs_to_keep was not passed
+        if seqs_to_keep is None:
+            # and invert_seqs_to_keep is True
+            if invert_seqs_to_keep:
+                # return an empty alignment (because we're inverting the
+                # default of keeping all sequences)
+                return self.__class__([])
+            # else if invert_seqs_to_keep is False
+            else:
+                # default to returning all sequences
+                def keep_seq(i, id):
+                    return True
+        # else, if seqs_to_keep was passed
+        else:
+            seqs_to_keep = set(seqs_to_keep)
+            # and invert_seqs_to_keep is True
+            if invert_seqs_to_keep:
+                # keep only sequences that were not listed in seqs_to_keep
+                def keep_seq(i, id):
+                    return not (id in seqs_to_keep or
+                                i in seqs_to_keep)
+            # else if invert_seqs_to_keep is False
+            else:
+                # keep only sequences that were listed in seqs_to_keep
+                def keep_seq(i, id):
+                    return (id in seqs_to_keep or
+                            i in seqs_to_keep)
+
+        # if positions_to_keep was not passed
+        if positions_to_keep is None:
+            # and invert_positions_to_keep is True
+            if invert_positions_to_keep:
+                # return an empty alignment (because we're inverting the
+                # default of keeping all positions)
+                return self.__class__([])
+            # else if invert_positions_to_keep is False
+            else:
+                # default to returning all positions
+                def keep_position(pos):
+                    return True
+        # else, if positions_to_keep was passed
+        else:
+            positions_to_keep = set(positions_to_keep)
+            # and invert_positions_to_keep is True
+            if invert_positions_to_keep:
+                # keep only positions that were not listed in
+                # positions_to_keep
+                def keep_position(pos):
+                    return pos not in positions_to_keep
+            # else if invert_positions_to_keep is False
+            else:
+                # keep only sequences that were listed in positions_to_keep
+                def keep_position(pos):
+                    return pos in positions_to_keep
+
+        # prep the result object
+        result = []
+        # indices to keep
+        indices = [
+            i for i in range(self.sequence_length()) if keep_position(i)]
+        # iterate over sequences
+        for sequence_index, seq in enumerate(self):
+            # determine if we're keeping the current sequence
+            if keep_seq(sequence_index, seq.id):
+                # slice the current sequence with the indices
+                result.append(seq[indices])
+            # if we're not keeping the current sequence, move on to the next
+            else:
+                continue
+        # pack the result up in the same type of object as the current object
+        # and return it
+        return self.__class__(result)
+
+    def iter_positions(self, constructor=None):
+        """Generator of Alignment positions (i.e., columns)
+
+        Parameters
+        ----------
+        constructor : type, optional
+            Constructor function for creating the positional values. By
+            default, these will be the same type as corresponding
+            `skbio.sequence.BiologicalSequence` in the `Alignment` object, but
+            you can pass a `skbio.sequence.BiologicalSequence` class here to
+            ensure that they are all of consistent type, or ``str`` to have
+            them returned as strings.
+
+        Returns
+        -------
+        GeneratorType
+            Generator of lists of positional values in the `Alignment`
+            (effectively the transpose of the alignment).
+
+        See Also
+        --------
+        iter
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('ACCGT--', id="seq1"),
+        ...              DNA('AACCGGT', id="seq2")]
+        >>> a1 = Alignment(sequences)
+        >>> for position in a1.iter_positions():
+        ...     print(position)
+        [<DNASequence: A (length: 1)>, <DNASequence: A (length: 1)>]
+        [<DNASequence: C (length: 1)>, <DNASequence: A (length: 1)>]
+        [<DNASequence: C (length: 1)>, <DNASequence: C (length: 1)>]
+        [<DNASequence: G (length: 1)>, <DNASequence: C (length: 1)>]
+        [<DNASequence: T (length: 1)>, <DNASequence: G (length: 1)>]
+        [<DNASequence: - (length: 1)>, <DNASequence: G (length: 1)>]
+        [<DNASequence: - (length: 1)>, <DNASequence: T (length: 1)>]
+
+        >>> for position in a1.iter_positions(constructor=str):
+        ...     print(position)
+        ['A', 'A']
+        ['C', 'A']
+        ['C', 'C']
+        ['G', 'C']
+        ['T', 'G']
+        ['-', 'G']
+        ['-', 'T']
+
+        """
+        if constructor is None:
+            def constructor(s):
+                return s
+        for i in range(self.sequence_length()):
+            position = [constructor(seq[i]) for seq in self]
+            yield position
+
+    def majority_consensus(self, constructor=None):
+        """Return the majority consensus sequence for the `Alignment`
+
+        .. note:: `constructor` parameter deprecated in scikit-bio 0.2.0-dev
+           `constructor` parameter will be removed in scikit-bio 0.3.0 as its
+           most common use is to convert to ``str``, and this functionality is
+           already accessible by calling ``str`` on the returned
+           ``BiologicalSequence`` (e.g., ``str(seq)``).
+
+        Parameters
+        ----------
+        constructor : function, optional
+            Constructor function for creating the consensus sequence. By
+            default, this will be the same type as the first sequence in the
+            `Alignment`.
+
+        Returns
+        -------
+        skbio.sequence.BiologicalSequence
+            The consensus sequence of the `Alignment`. In other words, at each
+            position the most common character is chosen, and those characters
+            are combined to create a new sequence. The sequence will not have
+            its ID, description, or quality set; only the consensus sequence
+            will be set.
+
+        Notes
+        -----
+        If there are two characters that are equally abundant in the sequence
+        at a given position, the choice of which of those characters will be
+        present at that position in the result is arbitrary.
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('AC--', id="seq1"),
+        ...              DNA('AT-C', id="seq2"),
+        ...              DNA('TT-C', id="seq3")]
+        >>> a1 = Alignment(sequences)
+        >>> a1.majority_consensus()
+        <DNASequence: AT-C (length: 4)>
+
+        """
+        # handle empty Alignment case
+        if self.is_empty():
+            return ''
+
+        if constructor is None:
+            constructor = self[0].__class__
+        else:
+            warnings.warn(
+                "constructor parameter in Alignment.majority_consensus is "
+                "deprecated and will be removed in scikit-bio 0.3.0. Please "
+                "update your code to construct the desired object from the "
+                "BiologicalSequence (or subclass) that is returned by this "
+                "method.", DeprecationWarning)
+
+        result = []
+        for c in self.position_counters():
+            # Counter.most_common returns an ordered list of the
+            # n most common (sequence, count) items in Counter. Here
+            # we set n=1, and take only the character, not the count.
+            result.append(c.most_common(1)[0][0])
+
+        # TODO when constructor parameter is removed, this join call can be
+        # removed
+        result = ''.join(result)
+        return constructor(result)
+
+    def omit_gap_positions(self, maximum_gap_frequency):
+        """Returns Alignment with positions filtered based on gap frequency
+
+        Parameters
+        ----------
+        maximum_gap_frequency : float
+            The maximum fraction of the sequences that can contain a gap at a
+            given position for that position to be retained in the resulting
+            `Alignment`.
+
+        Returns
+        -------
+        Alignment
+            The subalignment containing only the positions with gaps in fewer
+            than (or equal to) `maximum_gap_frequency` fraction of the
+            sequences.
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('AC--', id="seq1"),
+        ...              DNA('AT-C', id="seq2"),
+        ...              DNA('TT-C', id="seq3")]
+        >>> a1 = Alignment(sequences)
+        >>> a2 = a1.omit_gap_positions(0.50)
+        >>> a2
+        <Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
+        >>> print(a2[0])
+        AC-
+        >>> print(a2[1])
+        ATC
+        >>> print(a2[2])
+        TTC
+
+        """
+        # handle empty Alignment case
+        if self.is_empty():
+            return self.__class__([])
+
+        position_frequencies = self.position_frequencies()
+        gap_alphabet = self[0].gap_alphabet()
+
+        positions_to_keep = []
+        for i, f in enumerate(position_frequencies):
+            gap_frequency = sum([f[c] for c in gap_alphabet])
+            if gap_frequency <= maximum_gap_frequency:
+                positions_to_keep.append(i)
+        return self.subalignment(positions_to_keep=positions_to_keep)
+
+    def omit_gap_sequences(self, maximum_gap_frequency):
+        """Returns Alignment with sequences filtered based on gap frequency
+
+        Parameters
+        ----------
+        maximum_gap_frequency : float
+            The maximum fraction of the positions that can contain a gap in a
+            given sequence for that sequence to be retained in the resulting
+            `Alignment`.
+
+        Returns
+        -------
+        Alignment
+            The subalignment containing only the sequences with gaps in fewer
+            than (or equal to) `maximum_gap_frequency` fraction of the
+            positions.
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('AC--', id="seq1"),
+        ...              DNA('AT-C', id="seq2"),
+        ...              DNA('TT-C', id="seq3")]
+        >>> a1 = Alignment(sequences)
+        >>> a2 = a1.omit_gap_sequences(0.49)
+        >>> a2
+        <Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
+        >>> print(a2[0])
+        AT-C
+        >>> print(a2[1])
+        TT-C
+
+        """
+        # handle empty Alignment case
+        if self.is_empty():
+            return self.__class__([])
+
+        base_frequencies = self.k_word_frequencies(k=1)
+        gap_alphabet = self[0].gap_alphabet()
+        seqs_to_keep = []
+        for seq, f in zip(self, base_frequencies):
+            gap_frequency = sum([f[c] for c in gap_alphabet])
+            if gap_frequency <= maximum_gap_frequency:
+                seqs_to_keep.append(seq.id)
+        return self.subalignment(seqs_to_keep=seqs_to_keep)
+
+    def position_counters(self):
+        """Return collections.Counter object for positions in Alignment
+
+        Returns
+        -------
+        list
+            List of ``collections.Counter`` objects, one for each position in
+            the `Alignment`.
+
+        See Also
+        --------
+        position_frequencies
+        position_entropies
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('AC--', id="seq1"),
+        ...              DNA('AT-C', id="seq2"),
+        ...              DNA('TT-C', id="seq3")]
+        >>> a1 = Alignment(sequences)
+        >>> for counter in a1.position_counters():
+        ...     print(counter)
+        Counter({'A': 2, 'T': 1})
+        Counter({'T': 2, 'C': 1})
+        Counter({'-': 3})
+        Counter({'C': 2, '-': 1})
+
+        """
+        return [Counter(p) for p in self.iter_positions(constructor=str)]
+
+    def position_frequencies(self):
+        """Return frequencies of characters for positions in Alignment
+
+        Returns
+        -------
+        list
+            List of ``collection.defaultdict`` objects, one for each position
+            in the `Alignment`, representing the frequency of each character in
+            the `Alignment` at that position.
+
+        See Also
+        --------
+        position_counters
+        position_entropies
+        k_word_frequencies
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('AC--', id="seq1"),
+        ...              DNA('AT-C', id="seq2"),
+        ...              DNA('TT-C', id="seq3")]
+        >>> a1 = Alignment(sequences)
+        >>> position_freqs = a1.position_frequencies()
+        >>> round(position_freqs[0]['A'], 3)
+        0.667
+        >>> round(position_freqs[1]['A'], 3)
+        0.0
+
+        """
+        seq_count = self.sequence_count()
+        result = []
+        for pos_counter in self.position_counters():
+            freqs = defaultdict(float)
+            for char, count in viewitems(pos_counter):
+                freqs[char] = count / seq_count
+            result.append(freqs)
+        return result
+
+    def position_entropies(self, base=None,
+                           nan_on_non_standard_chars=True):
+        """Return Shannon entropy of positions in Alignment
+
+        Parameters
+        ----------
+        base : float, optional
+            log base for entropy calculation. If not passed, default will be e
+            (i.e., natural log will be computed).
+        nan_on_non_standard_chars : bool, optional
+            if True, the entropy at positions containing characters outside of
+            the first sequence's `iupac_standard_characters` will be `np.nan`.
+            This is useful, and the default behavior, as it's not clear how a
+            gap or degenerate character should contribute to a positional
+            entropy. This issue was described in [1]_.
+
+        Returns
+        -------
+        list
+            List of floats of Shannon entropy at `Alignment` positions. Shannon
+            entropy is defined in [2]_.
+
+        See Also
+        --------
+        position_counters
+        position_frequencies
+
+        References
+        ----------
+        .. [1] Identifying DNA and protein patterns with statistically
+           significant alignments of multiple sequences.
+           Hertz GZ, Stormo GD.
+           Bioinformatics. 1999 Jul-Aug;15(7-8):563-77.
+        .. [2] A Mathematical Theory of Communication
+           CE Shannon
+           The Bell System Technical Journal (1948).
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('AC--', id="seq1"),
+        ...              DNA('AT-C', id="seq2"),
+        ...              DNA('TT-C', id="seq3")]
+        >>> a1 = Alignment(sequences)
+        >>> print(a1.position_entropies())
+        [0.63651416829481278, 0.63651416829481278, nan, nan]
+
+        """
+        result = []
+        # handle empty Alignment case
+        if self.is_empty():
+            return result
+
+        iupac_standard_characters = self[0].iupac_standard_characters()
+        for f in self.position_frequencies():
+            if (nan_on_non_standard_chars and
+                    len(viewkeys(f) - iupac_standard_characters) > 0):
+                result.append(np.nan)
+            else:
+                result.append(entropy(list(f.values()), base=base))
+        return result
+
+    def sequence_length(self):
+        """Return the number of positions in Alignment
+
+        Returns
+        -------
+        int
+            The number of positions in `Alignment`.
+
+        See Also
+        --------
+        sequence_lengths
+        sequence_count
+
+        Examples
+        --------
+        >>> from skbio.alignment import Alignment
+        >>> from skbio.sequence import DNA
+        >>> sequences = [DNA('AC--', id="seq1"),
+        ...              DNA('AT-C', id="seq2"),
+        ...              DNA('TT-C', id="seq3")]
+        >>> a1 = Alignment(sequences)
+        >>> a1.sequence_length()
+        4
+
+        """
+        # handle the empty Alignment case
+        if self.is_empty():
+            return 0
+        else:
+            return len(self._data[0])
+
+    def to_phylip(self, map_labels=False, label_prefix=""):
+        """Return phylip-formatted string representing the `SequenceCollection`
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``Alignment.to_phylip`` will be removed in scikit-bio 0.3.0. It is
+           replaced by ``Alignment.write``, which is a more general method for
+           serializing alignments. ``Alignment.write`` supports multiple file
+           formats by taking advantage of scikit-bio's I/O registry system. See
+           :mod:`skbio.io` for more details.
+
+        Returns
+        -------
+        str
+            A phylip-formatted string representing the `Alignment`.
+
+        See Also
+        --------
+        write
+
+        """
+        warnings.warn(
+            "Alignment.to_phylip is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use "
+            "Alignment.write.", DeprecationWarning)
+
+        if self.is_empty():
+            raise SequenceCollectionError("PHYLIP-formatted string can only "
+                                          "be generated if there is at least "
+                                          "one sequence in the Alignment.")
+
+        sequence_length = self.sequence_length()
+        if sequence_length == 0:
+            raise SequenceCollectionError("PHYLIP-formatted string can only "
+                                          "be generated if there is at least "
+                                          "one position in the Alignment.")
+
+        ids = self.ids()
+        sequence_count = self.sequence_count()
+        result = ["%d %d" % (sequence_count, sequence_length)]
+        if map_labels:
+            _, new_id_to_old_id = self.update_ids(prefix=label_prefix)
+            old_id_to_new_id = {v: k for k, v in new_id_to_old_id.items()}
+        else:
+            new_id_to_old_id = {seq_id: seq_id for seq_id in ids}
+            old_id_to_new_id = new_id_to_old_id
+
+        for seq_id in ids:
+            new_id = old_id_to_new_id[seq_id]
+            seq = self[seq_id]
+            result.append("%s %s" % (new_id, str(seq)))
+
+        return '\n'.join(result), new_id_to_old_id
+
+    def _validate_lengths(self):
+        """Return ``True`` if all sequences same length, ``False`` otherwise
+        """
+        seq1_length = self.sequence_length()
+        for seq in self:
+            if seq1_length != len(seq):
+                return False
+        return True
+
+
+class StockholmAlignment(Alignment):
+    """Contains the metadata information in a Stockholm file alignment
+
+    Parameters
+    ----------
+    seqs : list of `skbio.sequence.BiologicalSequence` objects
+        The `skbio.sequence.BiologicalSequence` objects to load.
+    gf : dict, optional
+        GF info in the format {feature: info}
+    gs : dict of dicts, optional
+        GS info in the format {feature: {seqlabel: info}}
+    gr : dict of dicts, optional
+        GR info in the format {feature: {seqlabel: info}}
+    gc : dict, optional
+        GC info in the format {feature: info}
+
+    Notes
+    -----
+    The Stockholm format is described in [1]_ and [2]_.
+
+    If there are multiple references, include information for each R* line
+    as a list, with reference 0 information in position 0 for all lists,
+    etc. This list will be broken up into the appropriate bits for each
+    reference on string formatting.
+
+    If there are multiple trees included, use a list to store identifiers
+    and trees, with position 0 holding identifier for tree in position 0,
+    etc.
+
+    References
+    ----------
+    .. [1] http://sonnhammer.sbc.su.se/Stockholm.html
+    .. [2] http://en.wikipedia.org/wiki/Stockholm_format
+
+    Examples
+    --------
+    Assume we have a basic stockholm file with the following contents::
+
+        # STOCKHOLM 1.0
+        seq1         ACC--G-GGGU
+        seq2         TCC--G-GGGA
+        #=GC SS_cons (((.....)))
+        //
+
+    >>> from skbio.sequence import RNA
+    >>> from skbio.alignment import StockholmAlignment
+    >>> from StringIO import StringIO
+    >>> sto_in = StringIO("# STOCKHOLM 1.0\\n"
+    ...                   "seq1     ACC--G-GGGU\\nseq2     TCC--G-GGGA\\n"
+    ...                   "#=GC SS_cons (((.....)))\\n//")
+    >>> sto_records = StockholmAlignment.from_file(sto_in, RNA)
+    >>> sto = next(sto_records)
+    >>> print(sto)
+    # STOCKHOLM 1.0
+    seq1          ACC--G-GGGU
+    seq2          TCC--G-GGGA
+    #=GC SS_cons  (((.....)))
+    //
+    >>> sto.gc
+    {'SS_cons': '(((.....)))'}
+
+    We can also write out information by instantiating the StockholmAlignment
+    object and then printing it.
+
+    >>> from skbio.sequence import RNA
+    >>> from skbio.alignment import StockholmAlignment
+    >>> seqs = [RNA("ACC--G-GGGU", id="seq1"),
+    ...     RNA("TCC--G-GGGA", id="seq2")]
+    >>> gf = {
+    ... "RT": ["TITLE1",  "TITLE2"],
+    ... "RA": ["Auth1;", "Auth2;"],
+    ... "RL": ["J Mol Biol", "Cell"],
+    ... "RM": ["11469857", "12007400"]}
+    >>> sto = StockholmAlignment(seqs, gf=gf)
+    >>> print(sto)
+    # STOCKHOLM 1.0
+    #=GF RN [1]
+    #=GF RM 11469857
+    #=GF RT TITLE1
+    #=GF RA Auth1;
+    #=GF RL J Mol Biol
+    #=GF RN [2]
+    #=GF RM 12007400
+    #=GF RT TITLE2
+    #=GF RA Auth2;
+    #=GF RL Cell
+    seq1          ACC--G-GGGU
+    seq2          TCC--G-GGGA
+    //
+    """
+    def __init__(self, seqs, gf=None, gs=None, gr=None, gc=None,
+                 validate=False):
+        self.gf = gf if gf else {}
+        self.gs = gs if gs else {}
+        self.gr = gr if gr else {}
+        self.gc = gc if gc else {}
+        super(StockholmAlignment, self).__init__(seqs, validate)
+
+    def __str__(self):
+        """Parses StockholmAlignment into a string with stockholm format
+
+        Returns
+        -------
+        str
+            Stockholm formatted string containing all information in the object
+
+        Notes
+        -----
+        If references are included in GF data, the RN lines are automatically
+        generated if not provided.
+
+        """
+
+        # find length of leader info needed to make file pretty
+        # 10 comes from the characters for '#=GF ' and the feature after label
+        infolen = max(len(seq.id) for seq in self._data) + 10
+
+        GF_lines = []
+        GS_lines = []
+        GC_lines = []
+        # NOTE: EVERYTHING MUST BE COERECED TO STR in case int or float passed
+        # add GF information if applicable
+        if self.gf:
+            skipfeatures = set(("NH", "RC", "RM", "RN", "RA", "RL"))
+            for feature, value in self.gf.items():
+                # list of features to skip and parse special later
+                if feature in skipfeatures:
+                    continue
+                # list of features to parse special
+                elif feature == "TN":
+                    # trees must be in proper order of identifier then tree
+                    ident = value if isinstance(value, list) else [value]
+                    tree = self.gf["NH"] if isinstance(self.gf["NH"], list) \
+                        else [self.gf["NH"]]
+                    for ident, tree in zip(self.gf["TN"], self.gf["NH"]):
+                        GF_lines.append(' '.join(["#=GF", "TN", str(ident)]))
+                        GF_lines.append(' '.join(["#=GF", "NH", str(tree)]))
+                elif feature == "RT":
+                    # make sure each reference block stays together
+                    # set up lists to zip in case some bits are missing
+                    # create rn list if needed
+                    default_none = [0]*len(value)
+                    rn = self.gf.get("RN", ["[%i]" % x for x in
+                                     range(1, len(value)+1)])
+                    rm = self.gf.get("RM", default_none)
+                    rt = self.gf.get("RT", default_none)
+                    ra = self.gf.get("RA", default_none)
+                    rl = self.gf.get("RL", default_none)
+                    rc = self.gf.get("RC", default_none)
+                    # order: RN, RM, RT, RA, RL, RC
+                    for n, m, t, a, l, c in zip(rn, rm, rt, ra, rl, rc):
+                        GF_lines.append(' '.join(["#=GF", "RN", n]))
+                        if m:
+                            GF_lines.append(' '.join(["#=GF", "RM", str(m)]))
+                        if t:
+                            GF_lines.append(' '.join(["#=GF", "RT", str(t)]))
+                        if a:
+                            GF_lines.append(' '.join(["#=GF", "RA", str(a)]))
+                        if l:
+                            GF_lines.append(' '.join(["#=GF", "RL", str(l)]))
+                        if c:
+                            GF_lines.append(' '.join(["#=GF", "RC", str(c)]))
+                else:
+                    # normal addition for everything else
+                    if not isinstance(value, list):
+                        value = [value]
+                    for val in value:
+                        GF_lines.append(' '.join(["#=GF", feature, str(val)]))
+
+        # add GS information if applicable
+        if self.gs:
+            for feature in self.gs:
+                for seqname in self.gs[feature]:
+                    GS_lines.append(' '.join(["#=GS", seqname, feature,
+                                             str(self.gs[feature][seqname])]))
+
+        # add GC information if applicable
+        if self.gc:
+            for feature, value in viewitems(self.gc):
+                leaderinfo = ' '.join(["#=GC", feature])
+                spacer = ' ' * (infolen - len(leaderinfo))
+                GC_lines.append(spacer.join([leaderinfo,
+                                             str(self.gc[feature])]))
+
+        sto_lines = ["# STOCKHOLM 1.0"] + GF_lines + GS_lines
+        # create seq output along with GR info if applicable
+        for label, seq in self.iteritems():
+            spacer = ' ' * (infolen - len(label))
+            sto_lines.append(spacer.join([label, str(seq)]))
+            # GR info added for sequence
+            for feature in viewkeys(self.gr):
+                value = self.gr[feature][label]
+                leaderinfo = ' '.join(['#=GR', label, feature])
+                spacer = ' ' * (infolen - len(leaderinfo))
+                sto_lines.append(spacer.join([leaderinfo, value]))
+
+        sto_lines.extend(GC_lines)
+        # add final slashes to end of file
+        sto_lines.append('//')
+
+        return '\n'.join(sto_lines)
+
+    def to_file(self, out_f):
+        r"""Save the alignment to file in text format.
+
+        Parameters
+        ----------
+        out_f : file-like object or filename
+            File-like object to write serialized data to, or name of
+            file. If it's a file-like object, it must have a ``write``
+            method, and it won't be closed. Else, it is opened and
+            closed after writing.
+
+        See Also
+        --------
+        from_file
+        """
+        with open_file(out_f, 'w') as out_f:
+            out_f.write(self.__str__())
+
+    @staticmethod
+    def _parse_gf_info(lines):
+        """Takes care of parsing GF lines in stockholm plus special cases"""
+        parsed = defaultdict(list)
+        # needed for making each multi-line RT and NH one string
+        rt = []
+        nh = []
+        lastline = ""
+        for line in lines:
+            try:
+                init, feature, content = line.split(None, 2)
+            except ValueError:
+                raise StockholmParseError("Malformed GF line encountered!"
+                                          "\n%s" % line.split(None, 2))
+            if init != "#=GF":
+                raise StockholmParseError("Non-GF line encountered!")
+
+            # take care of adding multiline RT to the parsed information
+            if lastline == "RT" and feature != "RT":
+                # add rt line to the parsed dictionary
+                rtline = " ".join(rt)
+                rt = []
+                parsed["RT"].append(rtline)
+            elif feature == "RT":
+                rt.append(content)
+                lastline = feature
+                continue
+
+            # Take care of adding multiline NH to the parsed dictionary
+            elif lastline == "NH" and feature != "NH":
+                nhline = " ".join(nh)
+                nh = []
+                parsed["NH"].append(nhline)
+            elif feature == "NH":
+                nh.append(content)
+                lastline = feature
+                continue
+
+            # add current feature to the parsed information
+            parsed[feature].append(content)
+            lastline = feature
+
+        # removing unneccessary lists from parsed. Use .items() for py3 support
+        for feature, value in parsed.items():
+            # list of multi-line features to join into single string if needed
+            if feature in ["CC"]:
+                parsed[feature] = ' '.join(value)
+            elif len(parsed[feature]) == 1:
+                parsed[feature] = value[0]
+        return parsed
+
+    @staticmethod
+    def _parse_gc_info(lines, strict=False, seqlen=-1):
+        """Takes care of parsing GC lines in stockholm format"""
+        parsed = {}
+        for line in lines:
+            try:
+                init, feature, content = line.split(None, 2)
+            except ValueError:
+                raise StockholmParseError("Malformed GC line encountered!\n%s"
+                                          % line.split(None, 2))
+            if init != "#=GC":
+                raise StockholmParseError("Non-GC line encountered!")
+
+            # add current feature to the parsed information
+            if feature in parsed:
+                if strict:
+                    raise StockholmParseError("Should not have multiple lines "
+                                              "with the same feature: %s" %
+                                              feature)
+            else:
+                parsed[feature] = [content]
+
+        # removing unneccessary lists from parsed. Use .items() for py3 support
+        for feature, value in parsed.items():
+            parsed[feature] = ''.join(value)
+            if strict:
+                if len(value) != seqlen:
+                    raise StockholmParseError("GC must have exactly one char "
+                                              "per position in alignment!")
+
+        return parsed
+
+    @staticmethod
+    def _parse_gs_gr_info(lines, strict=False, seqlen=-1):
+        """Takes care of parsing GS and GR lines in stockholm format"""
+        parsed = {}
+        parsetype = ""
+        for line in lines:
+            try:
+                init, label, feature, content = line.split(None, 3)
+            except ValueError:
+                raise StockholmParseError("Malformed GS/GR line encountered!"
+                                          "\n%s" % line.split(None, 3))
+            if parsetype == "":
+                parsetype = init
+            elif init != parsetype:
+                    raise StockholmParseError("Non-GS/GR line encountered!")
+
+            # parse each line, taking into account interleaved format
+            if feature in parsed and label in parsed[feature]:
+                # interleaved format, so need list of content
+                parsed[feature][label].append(content)
+            else:
+                parsed[feature] = {label: [content]}
+
+        # join all the crazy lists created during parsing
+        for feature in parsed:
+            for label, content in parsed[feature].items():
+                parsed[feature][label] = ''.join(content)
+                if strict:
+                    if len(parsed[feature][label]) != seqlen:
+                        raise StockholmParseError("GR must have exactly one "
+                                                  "char per position in the "
+                                                  "alignment!")
+        return parsed
+
+    @classmethod
+    def from_file(cls, infile, seq_constructor, strict=False):
+        r"""yields StockholmAlignment objects from a stockholm file.
+
+        Parameters
+        ----------
+        infile : open file object
+            An open stockholm file.
+
+        seq_constructor : BiologicalSequence object
+            The biologicalsequence object that corresponds to what the
+            stockholm file holds. See skbio.sequence
+
+        strict : bool (optional)
+            Turns on strict parsing of GR and GC lines to ensure one char per
+             position. Default: False
+
+        Returns
+        -------
+        Iterator of StockholmAlignment objects
+
+        Raises
+        ------
+        skbio.alignment.StockholmParseError
+            If any lines are found that don't conform to stockholm format
+        """
+        # make sure first line is corect
+        line = infile.readline()
+        if not line.startswith("# STOCKHOLM 1.0"):
+            raise StockholmParseError("Incorrect header found")
+        gs_lines = []
+        gf_lines = []
+        gr_lines = []
+        gc_lines = []
+        # OrderedDict used so sequences maintain same order as in file
+        seqs = OrderedDict()
+        for line in infile:
+            line = line.strip()
+            if line == "" or line.startswith("# S"):
+                # skip blank lines or secondary headers
+                continue
+            elif line == "//":
+                # parse the record since we are at its end
+                # build the seuence list for alignment construction
+                seqs = [seq_constructor(seq, id=_id) for _id, seq in
+                        viewitems(seqs)]
+                # get length of sequences in the alignment
+                seqlen = len(seqs[0][1])
+
+                # parse information lines
+                gf = cls._parse_gf_info(gf_lines)
+                gs = cls._parse_gs_gr_info(gs_lines)
+                gr = cls._parse_gs_gr_info(gr_lines, strict, seqlen)
+                gc = cls._parse_gc_info(gc_lines, strict, seqlen)
+
+                # yield the actual stockholm object
+                yield cls(seqs, gf, gs, gr, gc)
+
+                # reset all storage variables
+                gs_lines = []
+                gf_lines = []
+                gr_lines = []
+                gc_lines = []
+                seqs = OrderedDict()
+            # add the metadata lines to the proper lists
+            elif line.startswith("#=GF"):
+                gf_lines.append(line)
+            elif line.startswith("#=GS"):
+                gs_lines.append(line)
+            elif line.startswith("#=GR"):
+                gr_lines.append(line)
+            elif line.startswith("#=GC"):
+                gc_lines.append(line)
+            else:
+                lineinfo = line.split()
+                # assume sequence since nothing else in format is left
+                # in case of interleaved format, need to do check
+                if lineinfo[0] in seqs:
+                    sequence = seqs[lineinfo[0]]
+                    seqs[lineinfo[0]] = ''.join([sequence, lineinfo[1]])
+                else:
+                    seqs[lineinfo[0]] = lineinfo[1]
diff --git a/skbio/alignment/_exception.py b/skbio/alignment/_exception.py
new file mode 100644
index 0000000..0577b8a
--- /dev/null
+++ b/skbio/alignment/_exception.py
@@ -0,0 +1,26 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from skbio.io import FileFormatError
+
+
+class SequenceCollectionError(Exception):
+    """General error for sequence collection validation failures."""
+    pass
+
+
+class AlignmentError(SequenceCollectionError):
+    """General error for alignment validation failures."""
+    pass
+
+
+class StockholmParseError(FileFormatError):
+    """Exception raised when a Stockholm formatted file cannot be parsed."""
+    pass
diff --git a/skbio/alignment/_lib/__init__.py b/skbio/alignment/_lib/__init__.py
new file mode 100644
index 0000000..610d868
--- /dev/null
+++ b/skbio/alignment/_lib/__init__.py
@@ -0,0 +1,10 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+test = Tester().test
diff --git a/skbio/alignment/_lib/ssw.c b/skbio/alignment/_lib/ssw.c
new file mode 100644
index 0000000..9b2398a
--- /dev/null
+++ b/skbio/alignment/_lib/ssw.c
@@ -0,0 +1,861 @@
+/* The MIT License
+
+   Copyright (c) 2012-1015 Boston College.
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be
+   included in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.    
+*/
+
+/* Contact: Mengyao Zhao <zhangmp at bc.edu> */
+
+/*
+ *  ssw.c
+ *
+ *  Created by Mengyao Zhao on 6/22/10.
+ *  Copyright 2010 Boston College. All rights reserved.
+ *  Version 0.1.4
+ *  Last revision by Mengyao Zhao on 12/07/12.
+ *
+ */
+
+#include <emmintrin.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include "ssw.h"
+
+#ifdef __GNUC__
+#define LIKELY(x) __builtin_expect((x),1)
+#define UNLIKELY(x) __builtin_expect((x),0)
+#else
+#define LIKELY(x) (x)
+#define UNLIKELY(x) (x)
+#endif
+
+/* Convert the coordinate in the scoring matrix into the coordinate in one line of the band. */
+#define set_u(u, w, i, j) { int x=(i)-(w); x=x>0?x:0; (u)=(j)-x+1; }
+
+/* Convert the coordinate in the direction matrix into the coordinate in one line of the band. */
+#define set_d(u, w, i, j, p) { int x=(i)-(w); x=x>0?x:0; x=(j)-x; (u)=x*3+p; }
+
+/*! @function
+  @abstract  Round an integer to the next closest power-2 integer.
+  @param  x  integer to be rounded (in place)
+  @discussion x will be modified.
+ */
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+
+typedef struct {
+    uint16_t score;
+    int32_t ref;     //0-based position 
+    int32_t read;    //alignment ending position on read, 0-based 
+} alignment_end;
+
+typedef struct {
+    uint32_t* seq;
+    int32_t length;
+} cigar;
+
+struct _profile{
+    __m128i* profile_byte;  // 0: none
+    __m128i* profile_word;  // 0: none
+    const int8_t* read;
+    const int8_t* mat;
+    int32_t readLen;
+    int32_t n;
+    uint8_t bias;
+};
+
+/* Generate query profile rearrange query sequence & calculate the weight of match/mismatch. */
+__m128i* qP_byte (const int8_t* read_num,
+                  const int8_t* mat,
+                  const int32_t readLen,
+                  const int32_t n,  /* the edge length of the squre matrix mat */
+                  uint8_t bias) {
+ 
+    int32_t segLen = (readLen + 15) / 16; /* Split the 128 bit register into 16 pieces. 
+                                     Each piece is 8 bit. Split the read into 16 segments. 
+                                     Calculat 16 segments in parallel.
+                                   */
+    __m128i* vProfile = (__m128i*)malloc(n * segLen * sizeof(__m128i));
+    int8_t* t = (int8_t*)vProfile;
+    int32_t nt, i, j, segNum;
+    
+    /* Generate query profile rearrange query sequence & calculate the weight of match/mismatch */
+    for (nt = 0; LIKELY(nt < n); nt ++) {
+        for (i = 0; i < segLen; i ++) {
+            j = i; 
+            for (segNum = 0; LIKELY(segNum < 16) ; segNum ++) {
+                *t++ = j>= readLen ? bias : mat[nt * n + read_num[j]] + bias;
+                j += segLen;
+            }
+        }
+    }
+    return vProfile;
+}
+
+/* Striped Smith-Waterman
+   Record the highest score of each reference position. 
+   Return the alignment score and ending position of the best alignment, 2nd best alignment, etc. 
+   Gap begin and gap extension are different. 
+   wight_match > 0, all other weights < 0.
+   The returned positions are 0-based.
+ */ 
+alignment_end* sw_sse2_byte (const int8_t* ref,
+                             int8_t ref_dir,    // 0: forward ref; 1: reverse ref
+                             int32_t refLen,
+                             int32_t readLen, 
+                             const uint8_t weight_gapO, /* will be used as - */
+                             const uint8_t weight_gapE, /* will be used as - */
+                             __m128i* vProfile,
+                             uint8_t terminate, /* the best alignment score: used to terminate 
+                                                   the matrix calculation when locating the 
+                                                   alignment beginning point. If this score 
+                                                   is set to 0, it will not be used */
+                             uint8_t bias,  /* Shift 0 point to a positive value. */
+                             int32_t maskLen) {  
+      
+#define max16(m, vm) (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 8)); \
+                      (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 4)); \
+                      (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 2)); \
+                      (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 1)); \
+                      (m) = _mm_extract_epi16((vm), 0)
+
+    uint8_t max = 0;                             /* the max alignment score */
+    int32_t end_read = readLen - 1;
+    int32_t end_ref = -1; /* 0_based best alignment ending point; Initialized as isn't aligned -1. */
+    int32_t segLen = (readLen + 15) / 16; /* number of segment */
+    
+    /* array to record the largest score of each reference position */
+    uint8_t* maxColumn = (uint8_t*) calloc(refLen, 1); 
+    
+    /* array to record the alignment read ending position of the largest score of each reference position */
+    int32_t* end_read_column = (int32_t*) calloc(refLen, sizeof(int32_t));
+    
+    /* Define 16 byte 0 vector. */
+    __m128i vZero = _mm_set1_epi32(0);
+
+    __m128i* pvHStore = (__m128i*) calloc(segLen, sizeof(__m128i));
+    __m128i* pvHLoad = (__m128i*) calloc(segLen, sizeof(__m128i));
+    __m128i* pvE = (__m128i*) calloc(segLen, sizeof(__m128i));
+    __m128i* pvHmax = (__m128i*) calloc(segLen, sizeof(__m128i));
+
+    int32_t i, j;
+    /* 16 byte insertion begin vector */
+    __m128i vGapO = _mm_set1_epi8(weight_gapO);
+    
+    /* 16 byte insertion extension vector */
+    __m128i vGapE = _mm_set1_epi8(weight_gapE); 
+    
+    /* 16 byte bias vector */
+    __m128i vBias = _mm_set1_epi8(bias);    
+
+    __m128i vMaxScore = vZero; /* Trace the highest score of the whole SW matrix. */
+    __m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */   
+    __m128i vTemp;
+    int32_t edge, begin = 0, end = refLen, step = 1; 
+//  int32_t distance = readLen * 2 / 3;
+//  int32_t distance = readLen / 2;
+//  int32_t distance = readLen;
+
+    /* outer loop to process the reference sequence */
+    if (ref_dir == 1) {
+        begin = refLen - 1;
+        end = -1;
+        step = -1;
+    }
+    for (i = begin; LIKELY(i != end); i += step) {
+        int32_t cmp;
+        __m128i e = vZero, vF = vZero, vMaxColumn = vZero; /* Initialize F value to 0. 
+                               Any errors to vH values will be corrected in the Lazy_F loop. 
+                             */
+//      max16(maxColumn[i], vMaxColumn);
+//      fprintf(stderr, "middle[%d]: %d\n", i, maxColumn[i]);
+
+        __m128i vH = pvHStore[segLen - 1];
+        vH = _mm_slli_si128 (vH, 1); /* Shift the 128-bit value in vH left by 1 byte. */
+        __m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
+
+        /* Swap the 2 H buffers. */
+        __m128i* pv = pvHLoad;
+        pvHLoad = pvHStore;
+        pvHStore = pv;
+        
+        /* inner loop to process the query sequence */
+        for (j = 0; LIKELY(j < segLen); ++j) {
+            vH = _mm_adds_epu8(vH, _mm_load_si128(vP + j));
+            vH = _mm_subs_epu8(vH, vBias); /* vH will be always > 0 */
+    //  max16(maxColumn[i], vH);
+    //  fprintf(stderr, "H[%d]: %d\n", i, maxColumn[i]);
+//  int8_t* t;
+//  int32_t ti;
+//for (t = (int8_t*)&vH, ti = 0; ti < 16; ++ti) fprintf(stderr, "%d\t", *t++);
+
+            /* Get max from vH, vE and vF. */
+            e = _mm_load_si128(pvE + j);
+            vH = _mm_max_epu8(vH, e);
+            vH = _mm_max_epu8(vH, vF);
+            vMaxColumn = _mm_max_epu8(vMaxColumn, vH);
+            
+    //  max16(maxColumn[i], vMaxColumn);
+    //  fprintf(stderr, "middle[%d]: %d\n", i, maxColumn[i]);
+//  for (t = (int8_t*)&vMaxColumn, ti = 0; ti < 16; ++ti) fprintf(stderr, "%d\t", *t++);
+
+            /* Save vH values. */
+            _mm_store_si128(pvHStore + j, vH);
+
+            /* Update vE value. */
+            vH = _mm_subs_epu8(vH, vGapO); /* saturation arithmetic, result >= 0 */
+            e = _mm_subs_epu8(e, vGapE);
+            e = _mm_max_epu8(e, vH);
+            _mm_store_si128(pvE + j, e);
+            
+            /* Update vF value. */
+            vF = _mm_subs_epu8(vF, vGapE);
+            vF = _mm_max_epu8(vF, vH);
+            
+            /* Load the next vH. */
+            vH = _mm_load_si128(pvHLoad + j);
+        }
+
+        /* Lazy_F loop: has been revised to disallow adjecent insertion and then deletion, so don't update E(i, j), learn from SWPS3 */
+        /* reset pointers to the start of the saved data */
+        j = 0;
+        vH = _mm_load_si128 (pvHStore + j);
+
+        /*  the computed vF value is for the given column.  since */
+        /*  we are at the end, we need to shift the vF value over */
+        /*  to the next column. */
+        vF = _mm_slli_si128 (vF, 1);
+        vTemp = _mm_subs_epu8 (vH, vGapO);
+        vTemp = _mm_subs_epu8 (vF, vTemp);
+        vTemp = _mm_cmpeq_epi8 (vTemp, vZero);
+        cmp  = _mm_movemask_epi8 (vTemp);
+
+        while (cmp != 0xffff) 
+        {
+            vH = _mm_max_epu8 (vH, vF);
+            vMaxColumn = _mm_max_epu8(vMaxColumn, vH);
+            _mm_store_si128 (pvHStore + j, vH);
+            vF = _mm_subs_epu8 (vF, vGapE);
+            j++;
+            if (j >= segLen)
+            {
+                j = 0;
+                vF = _mm_slli_si128 (vF, 1);
+            }
+            vH = _mm_load_si128 (pvHStore + j);
+
+            vTemp = _mm_subs_epu8 (vH, vGapO);
+            vTemp = _mm_subs_epu8 (vF, vTemp);
+            vTemp = _mm_cmpeq_epi8 (vTemp, vZero);
+            cmp  = _mm_movemask_epi8 (vTemp);
+        }
+
+        vMaxScore = _mm_max_epu8(vMaxScore, vMaxColumn);
+        vTemp = _mm_cmpeq_epi8(vMaxMark, vMaxScore);
+        cmp = _mm_movemask_epi8(vTemp);
+        if (cmp != 0xffff) {
+            uint8_t temp; 
+            vMaxMark = vMaxScore;
+            max16(temp, vMaxScore);
+            vMaxScore = vMaxMark;
+            
+            if (LIKELY(temp > max)) {
+                max = temp;
+                if (max + bias >= 255) break;   //overflow
+                end_ref = i;
+            
+                /* Store the column with the highest alignment score in order to trace the alignment ending position on read. */
+                for (j = 0; LIKELY(j < segLen); ++j) pvHmax[j] = pvHStore[j];
+            }
+        }
+
+        /* Record the max score of current column. */   
+        max16(maxColumn[i], vMaxColumn);
+//      fprintf(stderr, "maxColumn[%d]: %d\n", i, maxColumn[i]);
+        if (maxColumn[i] == terminate) break;
+    }
+    
+    /* Trace the alignment ending position on read. */
+    uint8_t *t = (uint8_t*)pvHmax;
+    int32_t column_len = segLen * 16;
+    for (i = 0; LIKELY(i < column_len); ++i, ++t) {
+        int32_t temp;
+        if (*t == max) {
+            temp = i / 16 + i % 16 * segLen;
+            if (temp < end_read) end_read = temp;
+        }
+    }
+
+    free(pvHmax);
+    free(pvE);
+    free(pvHLoad);
+    free(pvHStore);     
+
+    /* Find the most possible 2nd best alignment. */
+    alignment_end* bests = (alignment_end*) calloc(2, sizeof(alignment_end));
+    bests[0].score = max + bias >= 255 ? 255 : max;
+    bests[0].ref = end_ref;
+    bests[0].read = end_read;
+    
+    bests[1].score = 0;
+    bests[1].ref = 0;
+    bests[1].read = 0;
+
+    edge = (end_ref - maskLen) > 0 ? (end_ref - maskLen) : 0;
+    for (i = 0; i < edge; i ++) {
+//          fprintf (stderr, "maxColumn[%d]: %d\n", i, maxColumn[i]); 
+        if (maxColumn[i] > bests[1].score) {
+            bests[1].score = maxColumn[i];
+            bests[1].ref = i;
+        }
+    }
+    edge = (end_ref + maskLen) > refLen ? refLen : (end_ref + maskLen);
+    for (i = edge + 1; i < refLen; i ++) {
+//          fprintf (stderr, "refLen: %d\tmaxColumn[%d]: %d\n", refLen, i, maxColumn[i]); 
+        if (maxColumn[i] > bests[1].score) {
+            bests[1].score = maxColumn[i];
+            bests[1].ref = i;
+        }
+    }
+    
+    free(maxColumn);
+    free(end_read_column);
+    return bests;
+}
+
+__m128i* qP_word (const int8_t* read_num,
+                  const int8_t* mat,
+                  const int32_t readLen,
+                  const int32_t n) { 
+                    
+    int32_t segLen = (readLen + 7) / 8; 
+    __m128i* vProfile = (__m128i*)malloc(n * segLen * sizeof(__m128i));
+    int16_t* t = (int16_t*)vProfile;
+    int32_t nt, i, j;
+    int32_t segNum;
+    
+    /* Generate query profile rearrange query sequence & calculate the weight of match/mismatch */
+    for (nt = 0; LIKELY(nt < n); nt ++) {
+        for (i = 0; i < segLen; i ++) {
+            j = i; 
+            for (segNum = 0; LIKELY(segNum < 8) ; segNum ++) {
+                *t++ = j>= readLen ? 0 : mat[nt * n + read_num[j]];
+                j += segLen;
+            }
+        }
+    }
+    return vProfile;
+}
+
+alignment_end* sw_sse2_word (const int8_t* ref, 
+                             int8_t ref_dir,    // 0: forward ref; 1: reverse ref
+                             int32_t refLen,
+                             int32_t readLen, 
+                             const uint8_t weight_gapO, /* will be used as - */
+                             const uint8_t weight_gapE, /* will be used as - */
+                             __m128i* vProfile,
+                             uint16_t terminate, 
+                             int32_t maskLen) { 
+
+#define max8(m, vm) (vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 8)); \
+                    (vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 4)); \
+                    (vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 2)); \
+                    (m) = _mm_extract_epi16((vm), 0)
+    
+    uint16_t max = 0;                            /* the max alignment score */
+    int32_t end_read = readLen - 1;
+    int32_t end_ref = 0; /* 1_based best alignment ending point; Initialized as isn't aligned - 0. */
+    int32_t segLen = (readLen + 7) / 8; /* number of segment */
+    
+    /* array to record the largest score of each reference position */
+    uint16_t* maxColumn = (uint16_t*) calloc(refLen, 2); 
+    
+    /* array to record the alignment read ending position of the largest score of each reference position */
+    int32_t* end_read_column = (int32_t*) calloc(refLen, sizeof(int32_t));
+    
+    /* Define 16 byte 0 vector. */
+    __m128i vZero = _mm_set1_epi32(0);
+
+    __m128i* pvHStore = (__m128i*) calloc(segLen, sizeof(__m128i));
+    __m128i* pvHLoad = (__m128i*) calloc(segLen, sizeof(__m128i));
+    __m128i* pvE = (__m128i*) calloc(segLen, sizeof(__m128i));
+    __m128i* pvHmax = (__m128i*) calloc(segLen, sizeof(__m128i));
+
+    int32_t i, j, k;
+    /* 16 byte insertion begin vector */
+    __m128i vGapO = _mm_set1_epi16(weight_gapO);
+    
+    /* 16 byte insertion extension vector */
+    __m128i vGapE = _mm_set1_epi16(weight_gapE);    
+
+    /* 16 byte bias vector */
+    __m128i vMaxScore = vZero; /* Trace the highest score of the whole SW matrix. */
+    __m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */   
+    __m128i vTemp;
+    int32_t edge, begin = 0, end = refLen, step = 1;
+
+    /* outer loop to process the reference sequence */
+    if (ref_dir == 1) {
+        begin = refLen - 1;
+        end = -1;
+        step = -1;
+    }
+    for (i = begin; LIKELY(i != end); i += step) {
+        int32_t cmp;
+        __m128i e = vZero, vF = vZero; /* Initialize F value to 0. 
+                               Any errors to vH values will be corrected in the Lazy_F loop. 
+                             */
+        __m128i vH = pvHStore[segLen - 1];
+        vH = _mm_slli_si128 (vH, 2); /* Shift the 128-bit value in vH left by 2 byte. */
+        
+        /* Swap the 2 H buffers. */
+        __m128i* pv = pvHLoad;
+        
+        __m128i vMaxColumn = vZero; /* vMaxColumn is used to record the max values of column i. */
+        
+        __m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
+        pvHLoad = pvHStore;
+        pvHStore = pv;
+        
+        /* inner loop to process the query sequence */
+        for (j = 0; LIKELY(j < segLen); j ++) {
+            vH = _mm_adds_epi16(vH, _mm_load_si128(vP + j));
+
+            /* Get max from vH, vE and vF. */
+            e = _mm_load_si128(pvE + j);
+            vH = _mm_max_epi16(vH, e);
+            vH = _mm_max_epi16(vH, vF);
+            vMaxColumn = _mm_max_epi16(vMaxColumn, vH);
+            
+            /* Save vH values. */
+            _mm_store_si128(pvHStore + j, vH);
+
+            /* Update vE value. */
+            vH = _mm_subs_epu16(vH, vGapO); /* saturation arithmetic, result >= 0 */
+            e = _mm_subs_epu16(e, vGapE);
+            e = _mm_max_epi16(e, vH);
+            _mm_store_si128(pvE + j, e);
+
+            /* Update vF value. */
+            vF = _mm_subs_epu16(vF, vGapE);
+            vF = _mm_max_epi16(vF, vH);
+            
+            /* Load the next vH. */
+            vH = _mm_load_si128(pvHLoad + j);
+        }
+
+        /* Lazy_F loop: has been revised to disallow adjecent insertion and then deletion, so don't update E(i, j), learn from SWPS3 */
+        for (k = 0; LIKELY(k < 8); ++k) {
+            vF = _mm_slli_si128 (vF, 2);
+            for (j = 0; LIKELY(j < segLen); ++j) {
+                vH = _mm_load_si128(pvHStore + j);
+                vH = _mm_max_epi16(vH, vF);
+                _mm_store_si128(pvHStore + j, vH);
+                vH = _mm_subs_epu16(vH, vGapO);
+                vF = _mm_subs_epu16(vF, vGapE);
+                if (UNLIKELY(! _mm_movemask_epi8(_mm_cmpgt_epi16(vF, vH)))) goto end;
+            }
+        }
+
+end:    
+        vMaxScore = _mm_max_epi16(vMaxScore, vMaxColumn);   
+        vTemp = _mm_cmpeq_epi16(vMaxMark, vMaxScore);
+        cmp = _mm_movemask_epi8(vTemp);
+        if (cmp != 0xffff) {
+            uint16_t temp; 
+            vMaxMark = vMaxScore;
+            max8(temp, vMaxScore);
+            vMaxScore = vMaxMark;
+            
+            if (LIKELY(temp > max)) {
+                max = temp;
+                end_ref = i;
+                for (j = 0; LIKELY(j < segLen); ++j) pvHmax[j] = pvHStore[j];
+            }
+        }
+        
+        /* Record the max score of current column. */   
+        max8(maxColumn[i], vMaxColumn);
+        if (maxColumn[i] == terminate) break;
+    }   
+
+    /* Trace the alignment ending position on read. */
+    uint16_t *t = (uint16_t*)pvHmax;
+    int32_t column_len = segLen * 8;
+    for (i = 0; LIKELY(i < column_len); ++i, ++t) {
+        int32_t temp;
+        if (*t == max) {
+            temp = i / 8 + i % 8 * segLen;
+            if (temp < end_read) end_read = temp;
+        }
+    }
+
+    free(pvHmax);
+    free(pvE);
+    free(pvHLoad);
+    free(pvHStore); 
+    
+    /* Find the most possible 2nd best alignment. */
+    alignment_end* bests = (alignment_end*) calloc(2, sizeof(alignment_end));
+    bests[0].score = max;
+    bests[0].ref = end_ref;
+    bests[0].read = end_read;
+    
+    bests[1].score = 0;
+    bests[1].ref = 0;
+    bests[1].read = 0;
+
+    edge = (end_ref - maskLen) > 0 ? (end_ref - maskLen) : 0;
+    for (i = 0; i < edge; i ++) {
+        if (maxColumn[i] > bests[1].score) { 
+            bests[1].score = maxColumn[i];
+            bests[1].ref = i;
+        }
+    }
+    edge = (end_ref + maskLen) > refLen ? refLen : (end_ref + maskLen);
+    for (i = edge; i < refLen; i ++) {
+        if (maxColumn[i] > bests[1].score) {
+            bests[1].score = maxColumn[i];
+            bests[1].ref = i;
+        }
+    }
+    
+    free(maxColumn);
+    free(end_read_column);
+    return bests;
+}
+
+cigar* banded_sw (const int8_t* ref,
+                 const int8_t* read, 
+                 int32_t refLen, 
+                 int32_t readLen,
+                 int32_t score,
+                 const uint32_t weight_gapO,  /* will be used as - */
+                 const uint32_t weight_gapE,  /* will be used as - */
+                 int32_t band_width,
+                 const int8_t* mat, /* pointer to the weight matrix */
+                 int32_t n) {   
+
+    uint32_t *c = (uint32_t*)malloc(16 * sizeof(uint32_t)), *c1;
+    int32_t i, j, e, f, temp1, temp2, s = 16, s1 = 8, s2 = 1024, l, max = 0;
+    int32_t width, width_d, *h_b, *e_b, *h_c;
+    int8_t *direction, *direction_line;
+    cigar* result = (cigar*)malloc(sizeof(cigar));
+    h_b = (int32_t*)malloc(s1 * sizeof(int32_t)); 
+    e_b = (int32_t*)malloc(s1 * sizeof(int32_t)); 
+    h_c = (int32_t*)malloc(s1 * sizeof(int32_t)); 
+    direction = (int8_t*)malloc(s2 * sizeof(int8_t));
+
+    do {
+        width = band_width * 2 + 3, width_d = band_width * 2 + 1;
+        while (width >= s1) {
+            ++s1;
+            kroundup32(s1);
+            h_b = (int32_t*)realloc(h_b, s1 * sizeof(int32_t)); 
+            e_b = (int32_t*)realloc(e_b, s1 * sizeof(int32_t)); 
+            h_c = (int32_t*)realloc(h_c, s1 * sizeof(int32_t)); 
+        }
+        while (width_d * readLen * 3 >= s2) {
+            ++s2;
+            kroundup32(s2);
+            if (s2 < 0) {
+                fprintf(stderr, "Alignment score and position are not consensus.\n");
+                exit(1);
+            }
+            direction = (int8_t*)realloc(direction, s2 * sizeof(int8_t)); 
+        }
+        direction_line = direction;
+        for (j = 1; LIKELY(j < width - 1); j ++) h_b[j] = 0;
+        for (i = 0; LIKELY(i < readLen); i ++) {
+            int32_t beg = 0, end = refLen - 1, u = 0, edge;
+            j = i - band_width; beg = beg > j ? beg : j; // band start
+            j = i + band_width; end = end < j ? end : j; // band end
+            edge = end + 1 < width - 1 ? end + 1 : width - 1;
+            f = h_b[0] = e_b[0] = h_b[edge] = e_b[edge] = h_c[0] = 0;
+            direction_line = direction + width_d * i * 3;
+
+            for (j = beg; LIKELY(j <= end); j ++) {
+                int32_t b, e1, f1, d, de, df, dh;
+                set_u(u, band_width, i, j); set_u(e, band_width, i - 1, j); 
+                set_u(b, band_width, i, j - 1); set_u(d, band_width, i - 1, j - 1);
+                set_d(de, band_width, i, j, 0);
+                set_d(df, band_width, i, j, 1);
+                set_d(dh, band_width, i, j, 2);
+
+                temp1 = i == 0 ? -weight_gapO : h_b[e] - weight_gapO;
+                temp2 = i == 0 ? -weight_gapE : e_b[e] - weight_gapE;
+                e_b[u] = temp1 > temp2 ? temp1 : temp2;
+                direction_line[de] = temp1 > temp2 ? 3 : 2;
+        
+                temp1 = h_c[b] - weight_gapO;
+                temp2 = f - weight_gapE;
+                f = temp1 > temp2 ? temp1 : temp2;
+                direction_line[df] = temp1 > temp2 ? 5 : 4;
+                
+                e1 = e_b[u] > 0 ? e_b[u] : 0;
+                f1 = f > 0 ? f : 0;
+                temp1 = e1 > f1 ? e1 : f1;
+                temp2 = h_b[d] + mat[ref[j] * n + read[i]];
+                h_c[u] = temp1 > temp2 ? temp1 : temp2;
+        
+                if (h_c[u] > max) max = h_c[u];
+        
+                if (temp1 <= temp2) direction_line[dh] = 1;
+                else direction_line[dh] = e1 > f1 ? direction_line[de] : direction_line[df];
+            }
+            for (j = 1; j <= u; j ++) h_b[j] = h_c[j];
+        }
+        band_width *= 2;
+    } while (LIKELY(max < score));
+    band_width /= 2;
+
+    // trace back
+    i = readLen - 1;
+    j = refLen - 1;
+    e = 0;  // Count the number of M, D or I.
+    l = 0;  // record length of current cigar
+    f = max = 0; // M
+    temp2 = 2;  // h
+    while (LIKELY(i > 0)) {
+        set_d(temp1, band_width, i, j, temp2);
+        switch (direction_line[temp1]) {
+            case 1: 
+                --i;
+                --j;
+                temp2 = 2;
+                direction_line -= width_d * 3;
+                f = 0;  // M
+                break;
+            case 2:
+                --i;
+                temp2 = 0;  // e
+                direction_line -= width_d * 3;
+                f = 1;  // I
+                break;      
+            case 3:
+                --i;
+                temp2 = 2;
+                direction_line -= width_d * 3;
+                f = 1;  // I
+                break;
+            case 4:
+                --j;
+                temp2 = 1;
+                f = 2;  // D
+                break;
+            case 5:
+                --j;
+                temp2 = 2;
+                f = 2;  // D
+                break;
+            default: 
+                fprintf(stderr, "Trace back error: %d.\n", direction_line[temp1 - 1]);
+                return 0;
+        }
+        if (f == max) ++e;
+        else {
+            ++l;
+            while (l >= s) {
+                ++s;
+                kroundup32(s);
+                c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
+            }
+            c[l - 1] = e<<4|max;
+            max = f;
+            e = 1;
+        }
+    }
+    if (f == 0) {
+        ++l;
+        while (l >= s) {
+            ++s;
+            kroundup32(s);
+            c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
+        }
+        c[l - 1] = (e+1)<<4;
+    }else {
+        l += 2;
+        while (l >= s) {
+            ++s;
+            kroundup32(s);
+            c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
+        }
+        c[l - 2] = e<<4|f;
+        c[l - 1] = 16;  // 1M
+    }
+
+    // reverse cigar
+    c1 = (uint32_t*)malloc(l * sizeof(uint32_t));
+    s = 0;
+    e = l - 1;
+    while (LIKELY(s <= e)) {            
+        c1[s] = c[e];       
+        c1[e] = c[s];       
+        ++ s;                   
+        -- e;                       
+    }                               
+    result->seq = c1;
+    result->length = l;
+
+    free(direction);
+    free(h_c);
+    free(e_b);
+    free(h_b);
+    free(c);
+    return result;
+}
+
+int8_t* seq_reverse(const int8_t* seq, int32_t end) /* end is 0-based alignment ending position */  
+{                                   
+    int8_t* reverse = (int8_t*)calloc(end + 1, sizeof(int8_t)); 
+    int32_t start = 0;
+    while (LIKELY(start <= end)) {          
+        reverse[start] = seq[end];      
+        reverse[end] = seq[start];      
+        ++ start;                   
+        -- end;                     
+    }                               
+    return reverse;                 
+}
+        
+s_profile* ssw_init (const int8_t* read, const int32_t readLen, const int8_t* mat, const int32_t n, const int8_t score_size) {
+    s_profile* p = (s_profile*)calloc(1, sizeof(struct _profile));
+    p->profile_byte = 0;
+    p->profile_word = 0;
+    p->bias = 0;
+    
+    if (score_size == 0 || score_size == 2) {
+        /* Find the bias to use in the substitution matrix */
+        int32_t bias = 0, i;
+        for (i = 0; i < n*n; i++) if (mat[i] < bias) bias = mat[i];
+        bias = abs(bias);
+
+        p->bias = bias;
+        p->profile_byte = qP_byte (read, mat, readLen, n, bias);
+    }
+    if (score_size == 1 || score_size == 2) p->profile_word = qP_word (read, mat, readLen, n);
+    p->read = read;
+    p->mat = mat;
+    p->readLen = readLen;
+    p->n = n;
+    return p;
+}
+
+void init_destroy (s_profile* p) {
+    free(p->profile_byte);
+    free(p->profile_word);
+    free(p);
+}
+
+s_align* ssw_align (const s_profile* prof, 
+                    const int8_t* ref, 
+                    int32_t refLen, 
+                    const uint8_t weight_gapO, 
+                    const uint8_t weight_gapE, 
+                    const uint8_t flag, //  (from high to low) bit 5: return the best alignment beginning position; 6: if (ref_end1 - ref_begin1 <= filterd) && (read_end1 - read_begin1 <= filterd), return cigar; 7: if max score >= filters, return cigar; 8: always return cigar; if 6 & 7 are both setted, only return cigar when both filter fulfilled
+                    const uint16_t filters,
+                    const int32_t filterd,
+                    const int32_t maskLen) {
+
+    alignment_end* bests = 0, *bests_reverse = 0;
+    __m128i* vP = 0;
+    int32_t word = 0, band_width = 0, readLen = prof->readLen;
+    int8_t* read_reverse = 0;
+    cigar* path;
+    s_align* r = (s_align*)calloc(1, sizeof(s_align));
+    r->ref_begin1 = -1;
+    r->read_begin1 = -1;
+    r->cigar = 0;
+    r->cigarLen = 0;
+    if (maskLen < 15) {
+        fprintf(stderr, "When maskLen < 15, the function ssw_align doesn't return 2nd best alignment information.\n");
+    }
+
+    // Find the alignment scores and ending positions
+    if (prof->profile_byte) {
+        bests = sw_sse2_byte(ref, 0, refLen, readLen, weight_gapO, weight_gapE, prof->profile_byte, -1, prof->bias, maskLen);
+        if (prof->profile_word && bests[0].score == 255) {
+            free(bests);
+            bests = sw_sse2_word(ref, 0, refLen, readLen, weight_gapO, weight_gapE, prof->profile_word, -1, maskLen);
+            word = 1;
+        } else if (bests[0].score == 255) {
+            fprintf(stderr, "Please set 2 to the score_size parameter of the function ssw_init, otherwise the alignment results will be incorrect.\n");
+            return 0;
+        }
+    }else if (prof->profile_word) {
+        bests = sw_sse2_word(ref, 0, refLen, readLen, weight_gapO, weight_gapE, prof->profile_word, -1, maskLen);
+        word = 1;
+    }else {
+        fprintf(stderr, "Please call the function ssw_init before ssw_align.\n");
+        return 0;
+    }
+    r->score1 = bests[0].score;
+    r->ref_end1 = bests[0].ref;
+    r->read_end1 = bests[0].read;
+    if (maskLen >= 15) {
+        r->score2 = bests[1].score;
+        r->ref_end2 = bests[1].ref;
+    } else {
+        r->score2 = 0;
+        r->ref_end2 = -1;
+    }
+    free(bests);
+    if (flag == 0 || (flag == 2 && r->score1 < filters)) goto end;
+
+    // Find the beginning position of the best alignment.
+    read_reverse = seq_reverse(prof->read, r->read_end1);
+    if (word == 0) {
+        vP = qP_byte(read_reverse, prof->mat, r->read_end1 + 1, prof->n, prof->bias);
+        bests_reverse = sw_sse2_byte(ref, 1, r->ref_end1 + 1, r->read_end1 + 1, weight_gapO, weight_gapE, vP, r->score1, prof->bias, maskLen);
+    } else {
+        vP = qP_word(read_reverse, prof->mat, r->read_end1 + 1, prof->n);
+        bests_reverse = sw_sse2_word(ref, 1, r->ref_end1 + 1, r->read_end1 + 1, weight_gapO, weight_gapE, vP, r->score1, maskLen);
+    }
+    free(vP);
+    free(read_reverse);
+    r->ref_begin1 = bests_reverse[0].ref;
+    r->read_begin1 = r->read_end1 - bests_reverse[0].read;
+    free(bests_reverse);
+    if ((7&flag) == 0 || ((2&flag) != 0 && r->score1 < filters) || ((4&flag) != 0 && (r->ref_end1 - r->ref_begin1 > filterd || r->read_end1 - r->read_begin1 > filterd))) goto end;
+
+    // Generate cigar.
+    refLen = r->ref_end1 - r->ref_begin1 + 1;
+    readLen = r->read_end1 - r->read_begin1 + 1;
+    band_width = abs(refLen - readLen) + 1;
+    path = banded_sw(ref + r->ref_begin1, prof->read + r->read_begin1, refLen, readLen, r->score1, weight_gapO, weight_gapE, band_width, prof->mat, prof->n);
+    if (path == 0) r = 0;
+    else {
+        r->cigar = path->seq;
+        r->cigarLen = path->length;
+        free(path);
+    }
+    
+end: 
+    return r;
+}
+
+void align_destroy (s_align* a) {
+    free(a->cigar);
+    free(a);
+}
diff --git a/skbio/alignment/_lib/ssw.h b/skbio/alignment/_lib/ssw.h
new file mode 100644
index 0000000..49a7594
--- /dev/null
+++ b/skbio/alignment/_lib/ssw.h
@@ -0,0 +1,130 @@
+/*
+ *  ssw.h
+ *
+ *  Created by Mengyao Zhao on 6/22/10.
+ *  Copyright 2010 Boston College. All rights reserved.
+ *  Version 0.1.4
+ *  Last revision by Mengyao Zhao on 01/30/13.
+ *
+ */
+
+#ifndef SSW_H
+#define SSW_H
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <emmintrin.h>
+
+/*! @typedef    structure of the query profile  */
+struct _profile;
+typedef struct _profile s_profile;
+
+/*! @typedef    structure of the alignment result
+    @field  score1  the best alignment score
+    @field  score2  sub-optimal alignment score
+    @field  ref_begin1  0-based best alignment beginning position on reference; ref_begin1 = -1 when the best alignment beginning 
+                        position is not available
+    @field  ref_end1    0-based best alignment ending position on reference
+    @field  read_begin1 0-based best alignment beginning position on read; read_begin1 = -1 when the best alignment beginning 
+                        position is not available
+    @field  read_end1   0-based best alignment ending position on read
+    @field  read_end2   0-based sub-optimal alignment ending position on read
+    @field  cigar   best alignment cigar; stored the same as that in BAM format, high 28 bits: length, low 4 bits: M/I/D (0/1/2); 
+                    cigar = 0 when the best alignment path is not available
+    @field  cigarLen    length of the cigar string; cigarLen = 0 when the best alignment path is not available
+*/
+typedef struct {
+    uint16_t score1;    
+    uint16_t score2;    
+    int32_t ref_begin1; 
+    int32_t ref_end1;   
+    int32_t read_begin1;    
+    int32_t read_end1;  
+    int32_t ref_end2;
+    uint32_t* cigar;    
+    int32_t cigarLen;   
+} s_align;
+
+#ifdef __cplusplus
+extern "C" {
+#endif  // __cplusplus
+
+/*! @function   Create the query profile using the query sequence.
+    @param  read    pointer to the query sequence; the query sequence needs to be numbers
+    @param  readLen length of the query sequence
+    @param  mat pointer to the substitution matrix; mat needs to be corresponding to the read sequence
+    @param  n   the square root of the number of elements in mat (mat has n*n elements)
+    @param  score_size  estimated Smith-Waterman score; if your estimated best alignment score is surely < 255 please set 0; if 
+                        your estimated best alignment score >= 255, please set 1; if you don't know, please set 2 
+    @return pointer to the query profile structure
+    @note   example for parameter read and mat:
+            If the query sequence is: ACGTATC, the sequence that read points to can be: 1234142
+            Then if the penalty for match is 2 and for mismatch is -2, the substitution matrix of parameter mat will be:
+            //A  C  G  T  
+              2 -2 -2 -2 //A
+             -2  2 -2 -2 //C
+             -2 -2  2 -2 //G
+             -2 -2 -2  2 //T
+            mat is the pointer to the array {2, -2, -2, -2, -2, 2, -2, -2, -2, -2, 2, -2, -2, -2, -2, 2}
+*/
+s_profile* ssw_init (const int8_t* read, const int32_t readLen, const int8_t* mat, const int32_t n, const int8_t score_size);
+
+/*! @function   Release the memory allocated by function ssw_init.
+    @param  p   pointer to the query profile structure  
+*/
+void init_destroy (s_profile* p);
+
+// @function    ssw alignment.
+/*! @function   Do Striped Smith-Waterman alignment.
+    @param  prof    pointer to the query profile structure
+    @param  ref pointer to the target sequence; the target sequence needs to be numbers and corresponding to the mat parameter of
+                function ssw_init
+    @param  refLen  length of the target sequence
+    @param  weight_gapO the absolute value of gap open penalty  
+    @param  weight_gapE the absolute value of gap extension penalty
+    @param  flag    bitwise FLAG; (from high to low) bit 5: when setted as 1, function ssw_align will return the best alignment 
+                    beginning position; bit 6: when setted as 1, if (ref_end1 - ref_begin1 < filterd && read_end1 - read_begin1 
+                    < filterd), (whatever bit 5 is setted) the function will return the best alignment beginning position and 
+                    cigar; bit 7: when setted as 1, if the best alignment score >= filters, (whatever bit 5 is setted) the function
+                    will return the best alignment beginning position and cigar; bit 8: when setted as 1, (whatever bit 5, 6 or 7 is
+                    setted) the function will always return the best alignment beginning position and cigar. When flag == 0, only 
+                    the optimal and sub-optimal scores and the optimal alignment ending position will be returned.
+    @param  filters score filter: when bit 7 of flag is setted as 1 and bit 8 is setted as 0, filters will be used (Please check the
+                    decription of the flag parameter for detailed usage.)
+    @param  filterd distance filter: when bit 6 of flag is setted as 1 and bit 8 is setted as 0, filterd will be used (Please check 
+                    the decription of the flag parameter for detailed usage.)
+    @param  maskLen The distance between the optimal and suboptimal alignment ending position >= maskLen. We suggest to use 
+                    readLen/2, if you don't have special concerns. Note: maskLen has to be >= 15, otherwise this function will NOT 
+                    return the suboptimal alignment information. Detailed description of maskLen: After locating the optimal
+                    alignment ending position, the suboptimal alignment score can be heuristically found by checking the second 
+                    largest score in the array that contains the maximal score of each column of the SW matrix. In order to avoid 
+                    picking the scores that belong to the alignments sharing the partial best alignment, SSW C library masks the 
+                    reference loci nearby (mask length = maskLen) the best alignment ending position and locates the second largest 
+                    score from the unmasked elements.
+    @return pointer to the alignment result structure 
+    @note   Whatever the parameter flag is setted, this function will at least return the optimal and sub-optimal alignment score,
+            and the optimal alignment ending positions on target and query sequences. If both bit 6 and 7 of the flag are setted
+            while bit 8 is not, the function will return cigar only when both criteria are fulfilled. All returned positions are 
+            0-based coordinate.     
+*/
+s_align* ssw_align (const s_profile* prof, 
+                    const int8_t* ref, 
+                    int32_t refLen, 
+                    const uint8_t weight_gapO, 
+                    const uint8_t weight_gapE, 
+                    const uint8_t flag, 
+                    const uint16_t filters,
+                    const int32_t filterd,
+                    const int32_t maskLen);
+
+/*! @function   Release the memory allocated by function ssw_align.
+    @param  a   pointer to the alignment result structure
+*/
+void align_destroy (s_align* a);
+
+#ifdef __cplusplus
+}
+#endif  // __cplusplus
+
+#endif  // SSW_H
\ No newline at end of file
diff --git a/skbio/alignment/_pairwise.py b/skbio/alignment/_pairwise.py
new file mode 100644
index 0000000..9fa3c87
--- /dev/null
+++ b/skbio/alignment/_pairwise.py
@@ -0,0 +1,903 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from warnings import warn
+from itertools import product
+
+import numpy as np
+from future.builtins import range, zip
+from six import string_types
+
+from skbio.alignment import Alignment
+from skbio.sequence import BiologicalSequence
+from skbio.util import EfficiencyWarning
+
+# This is temporary: blosum50 does not exist in skbio yet as per
+# issue 161. When the issue is resolved, this should be removed in favor
+# of an import.
+blosum50 = \
+    {
+        '*': {'*': 1, 'A': -5, 'C': -5, 'B': -5, 'E': -5, 'D': -5, 'G': -5,
+              'F': -5, 'I': -5, 'H': -5, 'K': -5, 'M': -5, 'L': -5,
+              'N': -5, 'Q': -5, 'P': -5, 'S': -5, 'R': -5, 'T': -5,
+              'W': -5, 'V': -5, 'Y': -5, 'X': -5, 'Z': -5},
+        'A': {'*': -5, 'A': 5, 'C': -1, 'B': -2, 'E': -1, 'D': -2, 'G': 0,
+              'F': -3, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -2,
+              'N': -1, 'Q': -1, 'P': -1, 'S': 1, 'R': -2, 'T': 0, 'W': -3,
+              'V': 0, 'Y': -2, 'X': -1, 'Z': -1},
+        'C': {'*': -5, 'A': -1, 'C': 13, 'B': -3, 'E': -3, 'D': -4,
+              'G': -3, 'F': -2, 'I': -2, 'H': -3, 'K': -3, 'M': -2,
+              'L': -2, 'N': -2, 'Q': -3, 'P': -4, 'S': -1, 'R': -4,
+              'T': -1, 'W': -5, 'V': -1, 'Y': -3, 'X': -1, 'Z': -3},
+        'B': {'*': -5, 'A': -2, 'C': -3, 'B': 6, 'E': 1, 'D': 6, 'G': -1,
+              'F': -4, 'I': -4, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 5,
+              'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -5, 'V': -3,
+              'Y': -3, 'X': -1, 'Z': 1},
+        'E': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 6, 'D': 2, 'G': -3,
+              'F': -3, 'I': -4, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0,
+              'Q': 2, 'P': -1, 'S': -1, 'R': 0, 'T': -1, 'W': -3, 'V': -3,
+              'Y': -2, 'X': -1, 'Z': 5},
+        'D': {'*': -5, 'A': -2, 'C': -4, 'B': 6, 'E': 2, 'D': 8, 'G': -1,
+              'F': -5, 'I': -4, 'H': -1, 'K': -1, 'M': -4, 'L': -4, 'N': 2,
+              'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -5, 'V': -4,
+              'Y': -3, 'X': -1, 'Z': 1},
+        'G': {'*': -5, 'A': 0, 'C': -3, 'B': -1, 'E': -3, 'D': -1, 'G': 8,
+              'F': -4, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0,
+              'Q': -2, 'P': -2, 'S': 0, 'R': -3, 'T': -2, 'W': -3, 'V': -4,
+              'Y': -3, 'X': -1, 'Z': -2},
+        'F': {'*': -5, 'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -5,
+              'G': -4, 'F': 8, 'I': 0, 'H': -1, 'K': -4, 'M': 0, 'L': 1,
+              'N': -4, 'Q': -4, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 1,
+              'V': -1, 'Y': 4, 'X': -1, 'Z': -4},
+        'I': {'*': -5, 'A': -1, 'C': -2, 'B': -4, 'E': -4, 'D': -4,
+              'G': -4, 'F': 0, 'I': 5, 'H': -4, 'K': -3, 'M': 2, 'L': 2,
+              'N': -3, 'Q': -3, 'P': -3, 'S': -3, 'R': -4, 'T': -1,
+              'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -3},
+        'H': {'*': -5, 'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2,
+              'F': -1, 'I': -4, 'H': 10, 'K': 0, 'M': -1, 'L': -3, 'N': 1,
+              'Q': 1, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -3, 'V': -4,
+              'Y': 2, 'X': -1, 'Z': 0},
+        'K': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2,
+              'F': -4, 'I': -3, 'H': 0, 'K': 6, 'M': -2, 'L': -3, 'N': 0,
+              'Q': 2, 'P': -1, 'S': 0, 'R': 3, 'T': -1, 'W': -3, 'V': -3,
+              'Y': -2, 'X': -1, 'Z': 1},
+        'M': {'*': -5, 'A': -1, 'C': -2, 'B': -3, 'E': -2, 'D': -4,
+              'G': -3, 'F': 0, 'I': 2, 'H': -1, 'K': -2, 'M': 7, 'L': 3,
+              'N': -2, 'Q': 0, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -1,
+              'V': 1, 'Y': 0, 'X': -1, 'Z': -1},
+        'L': {'*': -5, 'A': -2, 'C': -2, 'B': -4, 'E': -3, 'D': -4,
+              'G': -4, 'F': 1, 'I': 2, 'H': -3, 'K': -3, 'M': 3, 'L': 5,
+              'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -1,
+              'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
+        'N': {'*': -5, 'A': -1, 'C': -2, 'B': 5, 'E': 0, 'D': 2, 'G': 0,
+              'F': -4, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -4, 'N': 7,
+              'Q': 0, 'P': -2, 'S': 1, 'R': -1, 'T': 0, 'W': -4, 'V': -3,
+              'Y': -2, 'X': -1, 'Z': 0},
+        'Q': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2,
+              'F': -4, 'I': -3, 'H': 1, 'K': 2, 'M': 0, 'L': -2, 'N': 0,
+              'Q': 7, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -1, 'V': -3,
+              'Y': -1, 'X': -1, 'Z': 4},
+        'P': {'*': -5, 'A': -1, 'C': -4, 'B': -2, 'E': -1, 'D': -1,
+              'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -3,
+              'L': -4, 'N': -2, 'Q': -1, 'P': 10, 'S': -1, 'R': -3,
+              'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': -1},
+        'S': {'*': -5, 'A': 1, 'C': -1, 'B': 0, 'E': -1, 'D': 0, 'G': 0,
+              'F': -3, 'I': -3, 'H': -1, 'K': 0, 'M': -2, 'L': -3, 'N': 1,
+              'Q': 0, 'P': -1, 'S': 5, 'R': -1, 'T': 2, 'W': -4, 'V': -2,
+              'Y': -2, 'X': -1, 'Z': 0},
+        'R': {'*': -5, 'A': -2, 'C': -4, 'B': -1, 'E': 0, 'D': -2, 'G': -3,
+              'F': -3, 'I': -4, 'H': 0, 'K': 3, 'M': -2, 'L': -3, 'N': -1,
+              'Q': 1, 'P': -3, 'S': -1, 'R': 7, 'T': -1, 'W': -3, 'V': -3,
+              'Y': -1, 'X': -1, 'Z': 0},
+        'T': {'*': -5, 'A': 0, 'C': -1, 'B': 0, 'E': -1, 'D': -1, 'G': -2,
+              'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0,
+              'Q': -1, 'P': -1, 'S': 2, 'R': -1, 'T': 5, 'W': -3, 'V': 0,
+              'Y': -2, 'X': -1, 'Z': -1},
+        'W': {'*': -5, 'A': -3, 'C': -5, 'B': -5, 'E': -3, 'D': -5,
+              'G': -3, 'F': 1, 'I': -3, 'H': -3, 'K': -3, 'M': -1, 'L': -2,
+              'N': -4, 'Q': -1, 'P': -4, 'S': -4, 'R': -3, 'T': -3,
+              'W': 15, 'V': -3, 'Y': 2, 'X': -1, 'Z': -2},
+        'V': {'*': -5, 'A': 0, 'C': -1, 'B': -3, 'E': -3, 'D': -4, 'G': -4,
+              'F': -1, 'I': 4, 'H': -4, 'K': -3, 'M': 1, 'L': 1, 'N': -3,
+              'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 5,
+              'Y': -1, 'X': -1, 'Z': -3},
+        'Y': {'*': -5, 'A': -2, 'C': -3, 'B': -3, 'E': -2, 'D': -3,
+              'G': -3, 'F': 4, 'I': -1, 'H': 2, 'K': -2, 'M': 0, 'L': -1,
+              'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -1, 'T': -2, 'W': 2,
+              'V': -1, 'Y': 8, 'X': -1, 'Z': -2},
+        'X': {'*': -5, 'A': -1, 'C': -1, 'B': -1, 'E': -1, 'D': -1,
+              'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1,
+              'L': -1, 'N': -1, 'Q': -1, 'P': -1, 'S': -1, 'R': -1,
+              'T': -1, 'W': -1, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
+        'Z': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 5, 'D': 1, 'G': -2,
+              'F': -4, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0,
+              'Q': 4, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -2, 'V': -3,
+              'Y': -2, 'X': -1, 'Z': 5}}
+
+
+def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
+                                    gap_extend_penalty=2,
+                                    match_score=2, mismatch_score=-3,
+                                    substitution_matrix=None):
+    """Locally align exactly two nucleotide seqs with Smith-Waterman
+
+    Parameters
+    ----------
+    seq1 : str or BiologicalSequence
+        The first unaligned sequence.
+    seq2 : str or BiologicalSequence
+        The second unaligned sequence.
+    gap_open_penalty : int or float, optional
+        Penalty for opening a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    gap_extend_penalty : int or float, optional
+        Penalty for extending a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    match_score : int or float, optional
+        The score to add for a match between a pair of bases (this is added
+        to the previous best alignment score, so is typically positive).
+    mismatch_score : int or float, optional
+        The score to add for a mismatch between a pair of bases (this is
+        added to the previous best alignment score, so is typically
+        negative).
+    substitution_matrix: 2D dict (or similar)
+        Lookup for substitution scores (these values are added to the
+        previous best alignment score). If provided, this overrides
+        ``match_score`` and ``mismatch_score``.
+
+    Returns
+    -------
+    skbio.Alignment
+        ``Alignment`` object containing the aligned sequences as well as
+        details about the alignment.
+
+    See Also
+    --------
+    local_pairwise_align
+    local_pairwise_align_protein
+    skbio.alignment.local_pairwise_align_ssw
+    global_pairwise_align
+    global_pairwise_align_protein
+    global_pairwise_align_nucelotide
+
+    Notes
+    -----
+    Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
+    ``gap_extend_penalty`` parameters are derived from the NCBI BLAST
+    Server [1]_.
+
+    References
+    ----------
+    .. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
+
+    """
+    # use the substitution matrix provided by the user, or compute from
+    # match_score and mismatch_score if a substitution matrix was not provided
+    if substitution_matrix is None:
+        substitution_matrix = \
+            make_identity_substitution_matrix(match_score, mismatch_score)
+    else:
+        pass
+
+    return local_pairwise_align(seq1, seq2, gap_open_penalty,
+                                gap_extend_penalty, substitution_matrix)
+
+
+def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
+                                 gap_extend_penalty=1,
+                                 substitution_matrix=None):
+    """Locally align exactly two protein seqs with Smith-Waterman
+
+    Parameters
+    ----------
+    seq1 : str or BiologicalSequence
+        The first unaligned sequence.
+    seq2 : str or BiologicalSequence
+        The second unaligned sequence.
+    gap_open_penalty : int or float, optional
+        Penalty for opening a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    gap_extend_penalty : int or float, optional
+        Penalty for extending a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    substitution_matrix: 2D dict (or similar), optional
+        Lookup for substitution scores (these values are added to the
+        previous best alignment score); default is BLOSUM 50.
+
+    Returns
+    -------
+    skbio.Alignment
+        ``Alignment`` object containing the aligned sequences as well as
+        details about the alignment.
+
+    See Also
+    --------
+    local_pairwise_align
+    local_pairwise_align_nucleotide
+    skbio.alignment.local_pairwise_align_ssw
+    global_pairwise_align
+    global_pairwise_align_protein
+    global_pairwise_align_nucelotide
+
+    Notes
+    -----
+    Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
+    derived from the NCBI BLAST Server [1]_.
+
+    The BLOSUM (blocks substitution matrices) amino acid substitution matrices
+    were originally defined in [2]_.
+
+    References
+    ----------
+    .. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
+    .. [2] Amino acid substitution matrices from protein blocks.
+       S Henikoff and J G Henikoff.
+       Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
+
+    """
+    if substitution_matrix is None:
+        substitution_matrix = blosum50
+
+    return local_pairwise_align(seq1, seq2, gap_open_penalty,
+                                gap_extend_penalty, substitution_matrix)
+
+
+def local_pairwise_align(seq1, seq2, gap_open_penalty,
+                         gap_extend_penalty, substitution_matrix):
+    """Locally align exactly two seqs with Smith-Waterman
+
+    Parameters
+    ----------
+    seq1 : str or BiologicalSequence
+        The first unaligned sequence.
+    seq2 : str or BiologicalSequence
+        The second unaligned sequence.
+    gap_open_penalty : int or float
+        Penalty for opening a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    gap_extend_penalty : int or float
+        Penalty for extending a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    substitution_matrix: 2D dict (or similar)
+        Lookup for substitution scores (these values are added to the
+        previous best alignment score).
+
+    Returns
+    -------
+    skbio.Alignment
+       ``Alignment`` object containing the aligned sequences as well as
+        details about the alignment.
+
+    See Also
+    --------
+    local_pairwise_align_protein
+    local_pairwise_align_nucleotide
+    skbio.alignment.local_pairwise_align_ssw
+    global_pairwise_align
+    global_pairwise_align_protein
+    global_pairwise_align_nucelotide
+
+    Notes
+    -----
+    This algorithm was originally described in [1]_. The scikit-bio
+    implementation was validated against the EMBOSS water web server [2]_.
+
+    References
+    ----------
+    .. [1] Identification of common molecular subsequences.
+       Smith TF, Waterman MS.
+       J Mol Biol. 1981 Mar 25;147(1):195-7.
+    .. [2] http://www.ebi.ac.uk/Tools/psa/emboss_water/
+
+    """
+    warn("You're using skbio's python implementation of Smith-Waterman "
+         "alignment. This will be very slow (e.g., thousands of times slower) "
+         "than skbio.alignment.local_pairwise_align_ssw.",
+         EfficiencyWarning)
+
+    seq1 = _coerce_alignment_input_type(seq1, disallow_alignment=True)
+    seq2 = _coerce_alignment_input_type(seq2, disallow_alignment=True)
+
+    score_matrix, traceback_matrix = _compute_score_and_traceback_matrices(
+        seq1, seq2, gap_open_penalty, gap_extend_penalty,
+        substitution_matrix, new_alignment_score=0.0,
+        init_matrices_f=_init_matrices_sw)
+
+    end_row_position, end_col_position =\
+        np.unravel_index(np.argmax(score_matrix), score_matrix.shape)
+
+    aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
+        _traceback(traceback_matrix, score_matrix, seq1, seq2,
+                   end_row_position, end_col_position)
+    start_end_positions = [(seq1_start_position, end_col_position-1),
+                           (seq2_start_position, end_row_position-1)]
+
+    return Alignment(aligned1 + aligned2, score=score,
+                     start_end_positions=start_end_positions)
+
+
+def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
+                                     gap_extend_penalty=2,
+                                     match_score=1, mismatch_score=-2,
+                                     substitution_matrix=None,
+                                     penalize_terminal_gaps=False):
+    """Globally align pair of nuc. seqs or alignments with Needleman-Wunsch
+
+    Parameters
+    ----------
+    seq1 : str, BiologicalSequence, or Alignment
+        The first unaligned sequence(s).
+    seq2 : str, BiologicalSequence, or Alignment
+        The second unaligned sequence(s).
+    gap_open_penalty : int or float, optional
+        Penalty for opening a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    gap_extend_penalty : int or float, optional
+        Penalty for extending a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    match_score : int or float, optional
+        The score to add for a match between a pair of bases (this is added
+        to the previous best alignment score, so is typically positive).
+    mismatch_score : int or float, optional
+        The score to add for a mismatch between a pair of bases (this is
+        added to the previous best alignment score, so is typically
+        negative).
+    substitution_matrix: 2D dict (or similar)
+        Lookup for substitution scores (these values are added to the
+        previous best alignment score). If provided, this overrides
+        ``match_score`` and ``mismatch_score``.
+    penalize_terminal_gaps: bool, optional
+        If True, will continue to penalize gaps even after one sequence has
+        been aligned through its end. This behavior is true Needleman-Wunsch
+        alignment, but results in (biologically irrelevant) artifacts when
+        the sequences being aligned are of different length. This is ``False``
+        by default, which is very likely to be the behavior you want in all or
+        nearly all cases.
+
+    Returns
+    -------
+    skbio.Alignment
+        ``Alignment`` object containing the aligned sequences as well as
+        details about the alignment.
+
+    See Also
+    --------
+    local_pairwise_align
+    local_pairwise_align_protein
+    local_pairwise_align_nucleotide
+    skbio.alignment.local_pairwise_align_ssw
+    global_pairwise_align
+    global_pairwise_align_protein
+
+    Notes
+    -----
+    Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
+    ``gap_extend_penalty`` parameters are derived from the NCBI BLAST
+    Server [1]_.
+
+    This function can be use to align either a pair of sequences, a pair of
+    alignments, or a sequence and an alignment.
+
+    References
+    ----------
+    .. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
+
+    """
+    # use the substitution matrix provided by the user, or compute from
+    # match_score and mismatch_score if a substitution matrix was not provided
+    if substitution_matrix is None:
+        substitution_matrix = \
+            make_identity_substitution_matrix(match_score, mismatch_score)
+    else:
+        pass
+
+    return global_pairwise_align(seq1, seq2, gap_open_penalty,
+                                 gap_extend_penalty, substitution_matrix,
+                                 penalize_terminal_gaps=penalize_terminal_gaps)
+
+
+def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
+                                  gap_extend_penalty=1,
+                                  substitution_matrix=None,
+                                  penalize_terminal_gaps=False):
+    """Globally align pair of protein seqs or alignments with Needleman-Wunsch
+
+    Parameters
+    ----------
+    seq1 : str, BiologicalSequence, or Alignment
+        The first unaligned sequence(s).
+    seq2 : str, BiologicalSequence, or Alignment
+        The second unaligned sequence(s).
+    gap_open_penalty : int or float, optional
+        Penalty for opening a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    gap_extend_penalty : int or float, optional
+        Penalty for extending a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    substitution_matrix: 2D dict (or similar), optional
+        Lookup for substitution scores (these values are added to the
+        previous best alignment score); default is BLOSUM 50.
+    penalize_terminal_gaps: bool, optional
+        If True, will continue to penalize gaps even after one sequence has
+        been aligned through its end. This behavior is true Needleman-Wunsch
+        alignment, but results in (biologically irrelevant) artifacts when
+        the sequences being aligned are of different length. This is ``False``
+        by default, which is very likely to be the behavior you want in all or
+        nearly all cases.
+
+    Returns
+    -------
+    skbio.Alignment
+        ``Alignment`` object containing the aligned sequences as well as
+        details about the alignment.
+
+    See Also
+    --------
+    local_pairwise_align
+    local_pairwise_align_protein
+    local_pairwise_align_nucleotide
+    skbio.alignment.local_pairwise_align_ssw
+    global_pairwise_align
+    global_pairwise_align_nucelotide
+
+    Notes
+    -----
+    Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
+    derived from the NCBI BLAST Server [1]_.
+
+    The BLOSUM (blocks substitution matrices) amino acid substitution matrices
+    were originally defined in [2]_.
+
+    This function can be use to align either a pair of sequences, a pair of
+    alignments, or a sequence and an alignment.
+
+    References
+    ----------
+    .. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
+    .. [2] Amino acid substitution matrices from protein blocks.
+       S Henikoff and J G Henikoff.
+       Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
+
+    """
+    if substitution_matrix is None:
+        substitution_matrix = blosum50
+
+    return global_pairwise_align(seq1, seq2, gap_open_penalty,
+                                 gap_extend_penalty, substitution_matrix,
+                                 penalize_terminal_gaps=penalize_terminal_gaps)
+
+
+def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
+                          substitution_matrix, penalize_terminal_gaps=False):
+    """Globally align a pair of seqs or alignments with Needleman-Wunsch
+
+    Parameters
+    ----------
+    seq1 : str, BiologicalSequence, or Alignment
+        The first unaligned sequence(s).
+    seq2 : str, BiologicalSequence, or Alignment
+        The second unaligned sequence(s).
+    gap_open_penalty : int or float
+        Penalty for opening a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    gap_extend_penalty : int or float
+        Penalty for extending a gap (this is substracted from previous best
+        alignment score, so is typically positive).
+    substitution_matrix: 2D dict (or similar)
+        Lookup for substitution scores (these values are added to the
+        previous best alignment score).
+    penalize_terminal_gaps: bool, optional
+        If True, will continue to penalize gaps even after one sequence has
+        been aligned through its end. This behavior is true Needleman-Wunsch
+        alignment, but results in (biologically irrelevant) artifacts when
+        the sequences being aligned are of different length. This is ``False``
+        by default, which is very likely to be the behavior you want in all or
+        nearly all cases.
+
+    Returns
+    -------
+    skbio.Alignment
+        ``Alignment`` object containing the aligned sequences as well as
+        details about the alignment.
+
+    See Also
+    --------
+    local_pairwise_align
+    local_pairwise_align_protein
+    local_pairwise_align_nucleotide
+    skbio.alignment.local_pairwise_align_ssw
+    global_pairwise_align_protein
+    global_pairwise_align_nucelotide
+
+    Notes
+    -----
+    This algorithm (in a slightly more basic form) was originally described
+    in [1]_. The scikit-bio implementation was validated against the
+    EMBOSS needle web server [2]_.
+
+    This function can be use to align either a pair of sequences, a pair of
+    alignments, or a sequence and an alignment.
+
+    References
+    ----------
+    .. [1] A general method applicable to the search for similarities in
+       the amino acid sequence of two proteins.
+       Needleman SB, Wunsch CD.
+       J Mol Biol. 1970 Mar;48(3):443-53.
+    .. [2] http://www.ebi.ac.uk/Tools/psa/emboss_needle/
+
+    """
+    warn("You're using skbio's python implementation of Needleman-Wunsch "
+         "alignment. This is known to be very slow (e.g., thousands of times "
+         "slower than a native C implementation). We'll be adding a faster "
+         "version soon (see https://github.com/biocore/scikit-bio/issues/254 "
+         "to track progress on this).", EfficiencyWarning)
+
+    seq1 = _coerce_alignment_input_type(seq1, disallow_alignment=False)
+    seq2 = _coerce_alignment_input_type(seq2, disallow_alignment=False)
+
+    if penalize_terminal_gaps:
+        init_matrices_f = _init_matrices_nw
+    else:
+        init_matrices_f = _init_matrices_nw_no_terminal_gap_penalty
+
+    score_matrix, traceback_matrix = \
+        _compute_score_and_traceback_matrices(
+            seq1, seq2, gap_open_penalty, gap_extend_penalty,
+            substitution_matrix, new_alignment_score=-np.inf,
+            init_matrices_f=init_matrices_f,
+            penalize_terminal_gaps=penalize_terminal_gaps)
+
+    end_row_position = traceback_matrix.shape[0] - 1
+    end_col_position = traceback_matrix.shape[1] - 1
+
+    aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
+        _traceback(traceback_matrix, score_matrix, seq1, seq2,
+                   end_row_position, end_col_position)
+    start_end_positions = [(seq1_start_position, end_col_position-1),
+                           (seq2_start_position, end_row_position-1)]
+
+    return Alignment(aligned1 + aligned2, score=score,
+                     start_end_positions=start_end_positions)
+
+
+def make_identity_substitution_matrix(match_score, mismatch_score,
+                                      alphabet='ACGTU'):
+    """Generate substitution matrix where all matches are scored equally
+
+    Parameters
+    ----------
+    match_score : int, float
+        The score that should be assigned for all matches. This value is
+        typically positive.
+    mismatch_score : int, float
+        The score that should be assigned for all mismatches. This value is
+        typically negative.
+    alphabet : iterable of str, optional
+        The characters that should be included in the substitution matrix.
+
+    Returns
+    -------
+    dict of dicts
+        All characters in alphabet are keys in both dictionaries, so that any
+        pair of characters can be looked up to get their match or mismatch
+        score.
+
+    """
+
+    warn("make_identity_substitution_matrix is deprecated and will soon be "
+         "replaced, though at the time of this writing the new name has not "
+         "been finalized. Updates will be posted to issue #161: "
+         "https://github.com/biocore/scikit-bio/issues/161",
+         DeprecationWarning)
+
+    result = {}
+    for c1 in alphabet:
+        row = {}
+        for c2 in alphabet:
+            if c1 == c2:
+                row[c2] = match_score
+            else:
+                row[c2] = mismatch_score
+        result[c1] = row
+    return result
+
+
+# Functions from here allow for generalized (global or local) alignment. I
+# will likely want to put these in a single object to make the naming a little
+# less clunky.
+
+
+def _coerce_alignment_input_type(seq, disallow_alignment):
+    """ Converts variety of types into an skbio.Alignment object
+    """
+    if isinstance(seq, string_types):
+        return Alignment([BiologicalSequence(seq)])
+    elif isinstance(seq, BiologicalSequence):
+        return Alignment([seq])
+    elif isinstance(seq, Alignment):
+        if disallow_alignment:
+            # This will disallow aligning either a pair of alignments, or an
+            # alignment and a sequence. We don't currently support this for
+            # local alignment as there is not a clear usecase, and it's also
+            # not exactly clear how this would work.
+            raise TypeError("Aligning alignments is not currently supported "
+                            "with the aligner function that you're calling.")
+        else:
+            return seq
+    else:
+        raise TypeError(
+            "Unsupported type provided to aligner: %r." % type(seq))
+
+
+_traceback_encoding = {'match': 1, 'vertical-gap': 2, 'horizontal-gap': 3,
+                       'uninitialized': -1, 'alignment-end': 0}
+
+
+def _get_seq_id(seq, default_id):
+    try:
+        result = seq.id
+    except AttributeError:
+        result = default_id
+    else:
+        if result is None or result.strip() == "":
+            result = default_id
+    return result
+
+
+def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
+    shape = (aln2.sequence_length()+1, aln1.sequence_length()+1)
+    score_matrix = np.zeros(shape)
+    traceback_matrix = np.zeros(shape, dtype=np.int)
+    traceback_matrix += _traceback_encoding['uninitialized']
+    traceback_matrix[0, :] = _traceback_encoding['alignment-end']
+    traceback_matrix[:, 0] = _traceback_encoding['alignment-end']
+    return score_matrix, traceback_matrix
+
+
+def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
+    shape = (aln2.sequence_length()+1, aln1.sequence_length()+1)
+    score_matrix = np.zeros(shape)
+    traceback_matrix = np.zeros(shape, dtype=np.int)
+    traceback_matrix += _traceback_encoding['uninitialized']
+    traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
+
+    # cache some values for quicker access
+    vgap = _traceback_encoding['vertical-gap']
+    hgap = _traceback_encoding['horizontal-gap']
+
+    for i in range(1, shape[0]):
+        score_matrix[i, 0] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
+        traceback_matrix[i, 0] = vgap
+
+    for i in range(1, shape[1]):
+        score_matrix[0, i] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
+        traceback_matrix[0, i] = hgap
+
+    return score_matrix, traceback_matrix
+
+
+def _init_matrices_nw_no_terminal_gap_penalty(
+        aln1, aln2, gap_open_penalty, gap_extend_penalty):
+    shape = (aln2.sequence_length()+1, aln1.sequence_length()+1)
+    score_matrix = np.zeros(shape)
+    traceback_matrix = np.zeros(shape, dtype=np.int)
+    traceback_matrix += _traceback_encoding['uninitialized']
+    traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
+
+    # cache some values for quicker access
+    vgap = _traceback_encoding['vertical-gap']
+    hgap = _traceback_encoding['horizontal-gap']
+
+    for i in range(1, shape[0]):
+        traceback_matrix[i, 0] = vgap
+
+    for i in range(1, shape[1]):
+        traceback_matrix[0, i] = hgap
+
+    return score_matrix, traceback_matrix
+
+
+def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
+                                gap_substitution_score):
+    substitution_score = 0
+    for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
+        if BiologicalSequence.is_gap(aln1_char) or\
+           BiologicalSequence.is_gap(aln2_char):
+                substitution_score += gap_substitution_score
+        else:
+            try:
+                substitution_score += \
+                    substitution_matrix[aln1_char][aln2_char]
+            except KeyError:
+                offending_chars = \
+                    [c for c in (aln1_char, aln2_char)
+                     if c not in substitution_matrix]
+                raise ValueError(
+                    "One of the sequences contains a character that is "
+                    "not contained in the substitution matrix. Are you "
+                    "using an appropriate substitution matrix for your "
+                    "sequence type (e.g., a nucleotide substitution "
+                    "matrix does not make sense for aligning protein "
+                    "sequences)? Does your sequence contain invalid "
+                    "characters? The offending character(s) is: "
+                    " %s." % ', '.join(offending_chars))
+    substitution_score /= (len(aln1_chars) * len(aln2_chars))
+    return substitution_score
+
+
+def _compute_score_and_traceback_matrices(
+        aln1, aln2, gap_open_penalty, gap_extend_penalty, substitution_matrix,
+        new_alignment_score=-np.inf, init_matrices_f=_init_matrices_nw,
+        penalize_terminal_gaps=True, gap_substitution_score=0):
+    """Return dynamic programming (score) and traceback matrices.
+
+    A note on the ``penalize_terminal_gaps`` parameter. When this value is
+    ``False``, this function is no longer true Smith-Waterman/Needleman-Wunsch
+    scoring, but when ``True`` it can result in biologically irrelevant
+    artifacts in Needleman-Wunsch (global) alignments. Specifically, if one
+    sequence is longer than the other (e.g., if aligning a primer sequence to
+    an amplification product, or searching for a gene in a genome) the shorter
+    sequence will have a long gap inserted. The parameter is ``True`` by
+    default (so that this function computes the score and traceback matrices as
+    described by the original authors) but the global alignment wrappers pass
+    ``False`` by default, so that the global alignment API returns the result
+    that users are most likely to be looking for.
+
+    """
+    aln1_length = aln1.sequence_length()
+    aln2_length = aln2.sequence_length()
+    # cache some values for quicker/simpler access
+    aend = _traceback_encoding['alignment-end']
+    match = _traceback_encoding['match']
+    vgap = _traceback_encoding['vertical-gap']
+    hgap = _traceback_encoding['horizontal-gap']
+
+    new_alignment_score = (new_alignment_score, aend)
+
+    # Initialize a matrix to use for scoring the alignment and for tracing
+    # back the best alignment
+    score_matrix, traceback_matrix = init_matrices_f(
+        aln1, aln2, gap_open_penalty, gap_extend_penalty)
+
+    # Iterate over the characters in aln2 (which corresponds to the vertical
+    # sequence in the matrix)
+    for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(str), 1):
+        # Iterate over the characters in aln1 (which corresponds to the
+        # horizontal sequence in the matrix)
+        for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(str), 1):
+            # compute the score for a match/mismatch
+            substitution_score = _compute_substitution_score(
+                aln1_chars, aln2_chars, substitution_matrix,
+                gap_substitution_score)
+
+            diag_score = \
+                (score_matrix[aln2_pos-1, aln1_pos-1] + substitution_score,
+                 match)
+
+            # compute the score for adding a gap in aln2 (vertical)
+            if not penalize_terminal_gaps and (aln1_pos == aln1_length):
+                # we've reached the end of aln1, so adding vertical gaps
+                # (which become gaps in aln1) should no longer
+                # be penalized (if penalize_terminal_gaps == False)
+                up_score = (score_matrix[aln2_pos-1, aln1_pos], vgap)
+            elif traceback_matrix[aln2_pos-1, aln1_pos] == vgap:
+                # gap extend, because the cell above was also a gap
+                up_score = \
+                    (score_matrix[aln2_pos-1, aln1_pos] - gap_extend_penalty,
+                     vgap)
+            else:
+                # gap open, because the cell above was not a gap
+                up_score = \
+                    (score_matrix[aln2_pos-1, aln1_pos] - gap_open_penalty,
+                     vgap)
+
+            # compute the score for adding a gap in aln1 (horizontal)
+            if not penalize_terminal_gaps and (aln2_pos == aln2_length):
+                # we've reached the end of aln2, so adding horizontal gaps
+                # (which become gaps in aln2) should no longer
+                # be penalized (if penalize_terminal_gaps == False)
+                left_score = (score_matrix[aln2_pos, aln1_pos-1], hgap)
+            elif traceback_matrix[aln2_pos, aln1_pos-1] == hgap:
+                # gap extend, because the cell to the left was also a gap
+                left_score = \
+                    (score_matrix[aln2_pos, aln1_pos-1] - gap_extend_penalty,
+                     hgap)
+            else:
+                # gap open, because the cell to the left was not a gap
+                left_score = \
+                    (score_matrix[aln2_pos, aln1_pos-1] - gap_open_penalty,
+                     hgap)
+
+            # identify the largest score, and use that information to populate
+            # the score and traceback matrices
+            best_score = _first_largest([new_alignment_score, left_score,
+                                         diag_score, up_score])
+            score_matrix[aln2_pos, aln1_pos] = best_score[0]
+            traceback_matrix[aln2_pos, aln1_pos] = best_score[1]
+
+    return score_matrix, traceback_matrix
+
+
+def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
+               start_col, gap_character='-'):
+    # cache some values for simpler
+    aend = _traceback_encoding['alignment-end']
+    match = _traceback_encoding['match']
+    vgap = _traceback_encoding['vertical-gap']
+    hgap = _traceback_encoding['horizontal-gap']
+
+    # initialize the result alignments
+    aln1_sequence_count = aln1.sequence_count()
+    aligned_seqs1 = [[] for e in range(aln1_sequence_count)]
+
+    aln2_sequence_count = aln2.sequence_count()
+    aligned_seqs2 = [[] for e in range(aln2_sequence_count)]
+
+    current_row = start_row
+    current_col = start_col
+
+    best_score = score_matrix[current_row, current_col]
+    current_value = None
+
+    while current_value != aend:
+        current_value = traceback_matrix[current_row, current_col]
+
+        if current_value == match:
+            for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
+                aligned_seq.append(str(input_seq[current_col-1]))
+            for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
+                aligned_seq.append(str(input_seq[current_row-1]))
+            current_row -= 1
+            current_col -= 1
+        elif current_value == vgap:
+            for aligned_seq in aligned_seqs1:
+                aligned_seq.append('-')
+            for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
+                aligned_seq.append(str(input_seq[current_row-1]))
+            current_row -= 1
+        elif current_value == hgap:
+            for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
+                aligned_seq.append(str(input_seq[current_col-1]))
+            for aligned_seq in aligned_seqs2:
+                aligned_seq.append('-')
+            current_col -= 1
+        elif current_value == aend:
+            continue
+        else:
+            raise ValueError(
+                "Invalid value in traceback matrix: %s" % current_value)
+
+    for i in range(aln1_sequence_count):
+        aligned_seq = ''.join(aligned_seqs1[i][::-1])
+        seq_id = _get_seq_id(aln1[i], str(i))
+        aligned_seqs1[i] = BiologicalSequence(aligned_seq, id=seq_id)
+
+    for i in range(aln2_sequence_count):
+        aligned_seq = ''.join(aligned_seqs2[i][::-1])
+        seq_id = _get_seq_id(aln2[i], str(i + aln1_sequence_count))
+        aligned_seqs2[i] = BiologicalSequence(aligned_seq, id=seq_id)
+
+    return (aligned_seqs1, aligned_seqs2, best_score,
+            current_col, current_row)
+
+
+def _first_largest(scores):
+    """ Similar to max, but returns the first element achieving the high score
+
+    If max receives a tuple, it will break a tie for the highest value
+    of entry[i] with entry[i+1]. We don't want that here - to better match
+    with the results of other tools, we want to be able to define which
+    entry is returned in the case of a tie.
+    """
+    result = scores[0]
+    for score, direction in scores[1:]:
+        if score > result[0]:
+            result = (score, direction)
+    return result
diff --git a/skbio/alignment/_ssw_wrapper.c b/skbio/alignment/_ssw_wrapper.c
new file mode 100644
index 0000000..6ede366
--- /dev/null
+++ b/skbio/alignment/_ssw_wrapper.c
@@ -0,0 +1,14406 @@
+/* Generated by Cython 0.20.2 on Thu Sep  4 14:19:20 2014 */
+
+#define PY_SSIZE_T_CLEAN
+#ifndef CYTHON_USE_PYLONG_INTERNALS
+#ifdef PYLONG_BITS_IN_DIGIT
+#define CYTHON_USE_PYLONG_INTERNALS 0
+#else
+#include "pyconfig.h"
+#ifdef PYLONG_BITS_IN_DIGIT
+#define CYTHON_USE_PYLONG_INTERNALS 1
+#else
+#define CYTHON_USE_PYLONG_INTERNALS 0
+#endif
+#endif
+#endif
+#include "Python.h"
+#ifndef Py_PYTHON_H
+    #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02040000
+    #error Cython requires Python 2.4+.
+#else
+#define CYTHON_ABI "0_20_2"
+#include <stddef.h> /* For offsetof */
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+  #ifndef __stdcall
+    #define __stdcall
+  #endif
+  #ifndef __cdecl
+    #define __cdecl
+  #endif
+  #ifndef __fastcall
+    #define __fastcall
+  #endif
+#endif
+#ifndef DL_IMPORT
+  #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+  #define DL_EXPORT(t) t
+#endif
+#ifndef PY_LONG_LONG
+  #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+  #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+#define CYTHON_COMPILING_IN_PYPY 1
+#define CYTHON_COMPILING_IN_CPYTHON 0
+#else
+#define CYTHON_COMPILING_IN_PYPY 0
+#define CYTHON_COMPILING_IN_CPYTHON 1
+#endif
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600
+#define Py_OptimizeFlag 0
+#endif
+#if PY_VERSION_HEX < 0x02050000
+  typedef int Py_ssize_t;
+  #define PY_SSIZE_T_MAX INT_MAX
+  #define PY_SSIZE_T_MIN INT_MIN
+  #define PY_FORMAT_SIZE_T ""
+  #define CYTHON_FORMAT_SSIZE_T ""
+  #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+  #define PyInt_AsSsize_t(o)   __Pyx_PyInt_As_int(o)
+  #define PyNumber_Index(o)    ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \
+                                (PyErr_Format(PyExc_TypeError, \
+                                              "expected index value, got %.200s", Py_TYPE(o)->tp_name), \
+                                 (PyObject*)0))
+  #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
+                                  !PyComplex_Check(o))
+  #define PyIndex_Check __Pyx_PyIndex_Check
+  #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
+  #define __PYX_BUILD_PY_SSIZE_T "i"
+#else
+  #define __PYX_BUILD_PY_SSIZE_T "n"
+  #define CYTHON_FORMAT_SSIZE_T "z"
+  #define __Pyx_PyIndex_Check PyIndex_Check
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+  #define Py_TYPE(ob)   (((PyObject*)(ob))->ob_type)
+  #define Py_SIZE(ob)   (((PyVarObject*)(ob))->ob_size)
+  #define PyVarObject_HEAD_INIT(type, size) \
+          PyObject_HEAD_INIT(type) size,
+  #define PyType_Modified(t)
+  typedef struct {
+     void *buf;
+     PyObject *obj;
+     Py_ssize_t len;
+     Py_ssize_t itemsize;
+     int readonly;
+     int ndim;
+     char *format;
+     Py_ssize_t *shape;
+     Py_ssize_t *strides;
+     Py_ssize_t *suboffsets;
+     void *internal;
+  } Py_buffer;
+  #define PyBUF_SIMPLE 0
+  #define PyBUF_WRITABLE 0x0001
+  #define PyBUF_FORMAT 0x0004
+  #define PyBUF_ND 0x0008
+  #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
+  #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
+  #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
+  #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
+  #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
+  #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
+  #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
+  typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
+  typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
+#endif
+#if PY_MAJOR_VERSION < 3
+  #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+          PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+  #define __Pyx_DefaultClassType PyClass_Type
+#else
+  #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+          PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+  #define __Pyx_DefaultClassType PyType_Type
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define Py_TPFLAGS_CHECKTYPES 0
+  #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+  #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define Py_TPFLAGS_HAVE_VERSION_TAG 0
+#endif
+#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT)
+  #define Py_TPFLAGS_IS_ABSTRACT 0
+#endif
+#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
+  #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+  #define CYTHON_PEP393_ENABLED 1
+  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ? \
+                                              0 : _PyUnicode_Ready((PyObject *)(op)))
+  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_LENGTH(u)
+  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+  #define __Pyx_PyUnicode_KIND(u)         PyUnicode_KIND(u)
+  #define __Pyx_PyUnicode_DATA(u)         PyUnicode_DATA(u)
+  #define __Pyx_PyUnicode_READ(k, d, i)   PyUnicode_READ(k, d, i)
+#else
+  #define CYTHON_PEP393_ENABLED 0
+  #define __Pyx_PyUnicode_READY(op)       (0)
+  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_SIZE(u)
+  #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+  #define __Pyx_PyUnicode_KIND(u)         (sizeof(Py_UNICODE))
+  #define __Pyx_PyUnicode_DATA(u)         ((void*)PyUnicode_AS_UNICODE(u))
+  #define __Pyx_PyUnicode_READ(k, d, i)   ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+  #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
+  #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
+#else
+  #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
+  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
+      PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#define __Pyx_PyString_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+  #define __Pyx_PyString_Format(a, b)  PyUnicode_Format(a, b)
+#else
+  #define __Pyx_PyString_Format(a, b)  PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define PyBaseString_Type            PyUnicode_Type
+  #define PyStringObject               PyUnicodeObject
+  #define PyString_Type                PyUnicode_Type
+  #define PyString_Check               PyUnicode_Check
+  #define PyString_CheckExact          PyUnicode_CheckExact
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define PyBytesObject                PyStringObject
+  #define PyBytes_Type                 PyString_Type
+  #define PyBytes_Check                PyString_Check
+  #define PyBytes_CheckExact           PyString_CheckExact
+  #define PyBytes_FromString           PyString_FromString
+  #define PyBytes_FromStringAndSize    PyString_FromStringAndSize
+  #define PyBytes_FromFormat           PyString_FromFormat
+  #define PyBytes_DecodeEscape         PyString_DecodeEscape
+  #define PyBytes_AsString             PyString_AsString
+  #define PyBytes_AsStringAndSize      PyString_AsStringAndSize
+  #define PyBytes_Size                 PyString_Size
+  #define PyBytes_AS_STRING            PyString_AS_STRING
+  #define PyBytes_GET_SIZE             PyString_GET_SIZE
+  #define PyBytes_Repr                 PyString_Repr
+  #define PyBytes_Concat               PyString_Concat
+  #define PyBytes_ConcatAndDel         PyString_ConcatAndDel
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+  #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+  #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
+                                         PyString_Check(obj) || PyUnicode_Check(obj))
+  #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define PySet_Check(obj)             PyObject_TypeCheck(obj, &PySet_Type)
+  #define PyFrozenSet_Check(obj)       PyObject_TypeCheck(obj, &PyFrozenSet_Type)
+#endif
+#ifndef PySet_CheckExact
+  #define PySet_CheckExact(obj)        (Py_TYPE(obj) == &PySet_Type)
+#endif
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#if PY_MAJOR_VERSION >= 3
+  #define PyIntObject                  PyLongObject
+  #define PyInt_Type                   PyLong_Type
+  #define PyInt_Check(op)              PyLong_Check(op)
+  #define PyInt_CheckExact(op)         PyLong_CheckExact(op)
+  #define PyInt_FromString             PyLong_FromString
+  #define PyInt_FromUnicode            PyLong_FromUnicode
+  #define PyInt_FromLong               PyLong_FromLong
+  #define PyInt_FromSize_t             PyLong_FromSize_t
+  #define PyInt_FromSsize_t            PyLong_FromSsize_t
+  #define PyInt_AsLong                 PyLong_AsLong
+  #define PyInt_AS_LONG                PyLong_AS_LONG
+  #define PyInt_AsSsize_t              PyLong_AsSsize_t
+  #define PyInt_AsUnsignedLongMask     PyLong_AsUnsignedLongMask
+  #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+  #define PyNumber_Int                 PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define PyBoolObject                 PyLongObject
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+  typedef long Py_hash_t;
+  #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+  #define __Pyx_PyInt_AsHash_t   PyInt_AsLong
+#else
+  #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+  #define __Pyx_PyInt_AsHash_t   PyInt_AsSsize_t
+#endif
+#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
+  #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
+  #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
+  #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
+#else
+  #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
+        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
+        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
+            (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
+  #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
+        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
+            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
+  #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
+        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
+            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+#if PY_VERSION_HEX < 0x02050000
+  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),((char *)(n)))
+  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
+  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),((char *)(n)))
+#else
+  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),(n))
+  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
+  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),(n))
+#endif
+#if PY_VERSION_HEX < 0x02050000
+  #define __Pyx_NAMESTR(n) ((char *)(n))
+  #define __Pyx_DOCSTR(n)  ((char *)(n))
+#else
+  #define __Pyx_NAMESTR(n) (n)
+  #define __Pyx_DOCSTR(n)  (n)
+#endif
+#ifndef CYTHON_INLINE
+  #if defined(__GNUC__)
+    #define CYTHON_INLINE __inline__
+  #elif defined(_MSC_VER)
+    #define CYTHON_INLINE __inline
+  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define CYTHON_INLINE inline
+  #else
+    #define CYTHON_INLINE
+  #endif
+#endif
+#ifndef CYTHON_RESTRICT
+  #if defined(__GNUC__)
+    #define CYTHON_RESTRICT __restrict__
+  #elif defined(_MSC_VER) && _MSC_VER >= 1400
+    #define CYTHON_RESTRICT __restrict
+  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define CYTHON_RESTRICT restrict
+  #else
+    #define CYTHON_RESTRICT
+  #endif
+#endif
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+  /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
+   a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
+   a quiet NaN. */
+  float value;
+  memset(&value, 0xFF, sizeof(value));
+  return value;
+}
+#endif
+#ifdef __cplusplus
+template<typename T>
+void __Pyx_call_destructor(T* x) {
+    x->~T();
+}
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
+  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
+#else
+  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_Divide(x,y)
+  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceDivide(x,y)
+#endif
+
+#ifndef __PYX_EXTERN_C
+  #ifdef __cplusplus
+    #define __PYX_EXTERN_C extern "C"
+  #else
+    #define __PYX_EXTERN_C extern
+  #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#define __PYX_HAVE__skbio__alignment___ssw_wrapper
+#define __PYX_HAVE_API__skbio__alignment___ssw_wrapper
+#include "string.h"
+#include "stdio.h"
+#include "pythread.h"
+#include "stdlib.h"
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+#include "_lib/ssw.h"
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#ifdef PYREX_WITHOUT_ASSERTIONS
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+#   if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+#     define CYTHON_UNUSED __attribute__ ((__unused__))
+#   else
+#     define CYTHON_UNUSED
+#   endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+#   define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+#   define CYTHON_UNUSED
+# endif
+#endif
+typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
+                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (    \
+    (sizeof(type) < sizeof(Py_ssize_t))  ||             \
+    (sizeof(type) > sizeof(Py_ssize_t) &&               \
+          likely(v < (type)PY_SSIZE_T_MAX ||            \
+                 v == (type)PY_SSIZE_T_MAX)  &&         \
+          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||       \
+                                v == (type)PY_SSIZE_T_MIN)))  ||  \
+    (sizeof(type) == sizeof(Py_ssize_t) &&              \
+          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||        \
+                               v == (type)PY_SSIZE_T_MAX)))  )
+static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString        PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+    #define __Pyx_PyStr_FromString        __Pyx_PyBytes_FromString
+    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+    #define __Pyx_PyStr_FromString        __Pyx_PyUnicode_FromString
+    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyObject_AsSString(s)    ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s)    ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromUString(s)  __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromUString(s)   __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromUString(s)   __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromUString(s)     __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((const char*)s)
+#if PY_MAJOR_VERSION < 3
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
+{
+    const Py_UNICODE *u_end = u;
+    while (*u_end++) ;
+    return (size_t)(u_end - u - 1);
+}
+#else
+#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
+#endif
+#define __Pyx_PyUnicode_FromUnicode(u)       PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
+#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+    PyObject* sys;
+    PyObject* default_encoding = NULL;
+    PyObject* ascii_chars_u = NULL;
+    PyObject* ascii_chars_b = NULL;
+    const char* default_encoding_c;
+    sys = PyImport_ImportModule("sys");
+    if (!sys) goto bad;
+    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+    Py_DECREF(sys);
+    if (!default_encoding) goto bad;
+    default_encoding_c = PyBytes_AsString(default_encoding);
+    if (!default_encoding_c) goto bad;
+    if (strcmp(default_encoding_c, "ascii") == 0) {
+        __Pyx_sys_getdefaultencoding_not_ascii = 0;
+    } else {
+        char ascii_chars[128];
+        int c;
+        for (c = 0; c < 128; c++) {
+            ascii_chars[c] = c;
+        }
+        __Pyx_sys_getdefaultencoding_not_ascii = 1;
+        ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+        if (!ascii_chars_u) goto bad;
+        ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+        if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+            PyErr_Format(
+                PyExc_ValueError,
+                "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+                default_encoding_c);
+            goto bad;
+        }
+        Py_DECREF(ascii_chars_u);
+        Py_DECREF(ascii_chars_b);
+    }
+    Py_DECREF(default_encoding);
+    return 0;
+bad:
+    Py_XDECREF(default_encoding);
+    Py_XDECREF(ascii_chars_u);
+    Py_XDECREF(ascii_chars_b);
+    return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+    PyObject* sys;
+    PyObject* default_encoding = NULL;
+    char* default_encoding_c;
+    sys = PyImport_ImportModule("sys");
+    if (!sys) goto bad;
+    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+    Py_DECREF(sys);
+    if (!default_encoding) goto bad;
+    default_encoding_c = PyBytes_AsString(default_encoding);
+    if (!default_encoding_c) goto bad;
+    __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
+    if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+    strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+    Py_DECREF(default_encoding);
+    return 0;
+bad:
+    Py_XDECREF(default_encoding);
+    return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__)     && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+  #define likely(x)   __builtin_expect(!!(x), 1)
+  #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+  #define likely(x)   (x)
+  #define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+#if !defined(CYTHON_CCOMPLEX)
+  #if defined(__cplusplus)
+    #define CYTHON_CCOMPLEX 1
+  #elif defined(_Complex_I)
+    #define CYTHON_CCOMPLEX 1
+  #else
+    #define CYTHON_CCOMPLEX 0
+  #endif
+#endif
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    #include <complex>
+  #else
+    #include <complex.h>
+  #endif
+#endif
+#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
+  #undef _Complex_I
+  #define _Complex_I 1.0fj
+#endif
+
+
+static const char *__pyx_f[] = {
+  "_ssw_wrapper.pyx",
+  "__init__.pxd",
+  "type.pxd",
+  "bool.pxd",
+  "complex.pxd",
+};
+#define IS_UNSIGNED(type) (((type) -1) > 0)
+struct __Pyx_StructField_;
+#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
+typedef struct {
+  const char* name; /* for error messages only */
+  struct __Pyx_StructField_* fields;
+  size_t size;     /* sizeof(type) */
+  size_t arraysize[8]; /* length of array in each dimension */
+  int ndim;
+  char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */
+  char is_unsigned;
+  int flags;
+} __Pyx_TypeInfo;
+typedef struct __Pyx_StructField_ {
+  __Pyx_TypeInfo* type;
+  const char* name;
+  size_t offset;
+} __Pyx_StructField;
+typedef struct {
+  __Pyx_StructField* field;
+  size_t parent_offset;
+} __Pyx_BufFmt_StackElem;
+typedef struct {
+  __Pyx_StructField root;
+  __Pyx_BufFmt_StackElem* head;
+  size_t fmt_offset;
+  size_t new_count, enc_count;
+  size_t struct_alignment;
+  int is_complex;
+  char enc_type;
+  char new_packmode;
+  char enc_packmode;
+  char is_valid_array;
+} __Pyx_BufFmt_Context;
+
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723
+ * # in Cython to enable them only on the right systems.
+ * 
+ * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_int16      int16_t
+ * ctypedef npy_int32      int32_t
+ */
+typedef npy_int8 __pyx_t_5numpy_int8_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724
+ * 
+ * ctypedef npy_int8       int8_t
+ * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_int32      int32_t
+ * ctypedef npy_int64      int64_t
+ */
+typedef npy_int16 __pyx_t_5numpy_int16_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725
+ * ctypedef npy_int8       int8_t
+ * ctypedef npy_int16      int16_t
+ * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_int64      int64_t
+ * #ctypedef npy_int96      int96_t
+ */
+typedef npy_int32 __pyx_t_5numpy_int32_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+ * ctypedef npy_int16      int16_t
+ * ctypedef npy_int32      int32_t
+ * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
+ * #ctypedef npy_int96      int96_t
+ * #ctypedef npy_int128     int128_t
+ */
+typedef npy_int64 __pyx_t_5numpy_int64_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730
+ * #ctypedef npy_int128     int128_t
+ * 
+ * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uint16     uint16_t
+ * ctypedef npy_uint32     uint32_t
+ */
+typedef npy_uint8 __pyx_t_5numpy_uint8_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731
+ * 
+ * ctypedef npy_uint8      uint8_t
+ * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uint32     uint32_t
+ * ctypedef npy_uint64     uint64_t
+ */
+typedef npy_uint16 __pyx_t_5numpy_uint16_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732
+ * ctypedef npy_uint8      uint8_t
+ * ctypedef npy_uint16     uint16_t
+ * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uint64     uint64_t
+ * #ctypedef npy_uint96     uint96_t
+ */
+typedef npy_uint32 __pyx_t_5numpy_uint32_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+ * ctypedef npy_uint16     uint16_t
+ * ctypedef npy_uint32     uint32_t
+ * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
+ * #ctypedef npy_uint96     uint96_t
+ * #ctypedef npy_uint128    uint128_t
+ */
+typedef npy_uint64 __pyx_t_5numpy_uint64_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737
+ * #ctypedef npy_uint128    uint128_t
+ * 
+ * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_float64    float64_t
+ * #ctypedef npy_float80    float80_t
+ */
+typedef npy_float32 __pyx_t_5numpy_float32_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738
+ * 
+ * ctypedef npy_float32    float32_t
+ * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
+ * #ctypedef npy_float80    float80_t
+ * #ctypedef npy_float128   float128_t
+ */
+typedef npy_float64 __pyx_t_5numpy_float64_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747
+ * # The int types are mapped a bit surprising --
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong   long_t
+ * ctypedef npy_longlong   longlong_t
+ */
+typedef npy_long __pyx_t_5numpy_int_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long       int_t
+ * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong   longlong_t
+ * 
+ */
+typedef npy_longlong __pyx_t_5numpy_long_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749
+ * ctypedef npy_long       int_t
+ * ctypedef npy_longlong   long_t
+ * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_ulong      uint_t
+ */
+typedef npy_longlong __pyx_t_5numpy_longlong_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+ * ctypedef npy_longlong   longlong_t
+ * 
+ * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong  ulong_t
+ * ctypedef npy_ulonglong  ulonglong_t
+ */
+typedef npy_ulong __pyx_t_5numpy_uint_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+ * 
+ * ctypedef npy_ulong      uint_t
+ * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong  ulonglong_t
+ * 
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753
+ * ctypedef npy_ulong      uint_t
+ * ctypedef npy_ulonglong  ulong_t
+ * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_intp       intp_t
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+ * ctypedef npy_ulonglong  ulonglong_t
+ * 
+ * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uintp      uintp_t
+ * 
+ */
+typedef npy_intp __pyx_t_5numpy_intp_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+ * 
+ * ctypedef npy_intp       intp_t
+ * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_double     float_t
+ */
+typedef npy_uintp __pyx_t_5numpy_uintp_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+ * ctypedef npy_uintp      uintp_t
+ * 
+ * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_double     double_t
+ * ctypedef npy_longdouble longdouble_t
+ */
+typedef npy_double __pyx_t_5numpy_float_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+ * 
+ * ctypedef npy_double     float_t
+ * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_longdouble longdouble_t
+ * 
+ */
+typedef npy_double __pyx_t_5numpy_double_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760
+ * ctypedef npy_double     float_t
+ * ctypedef npy_double     double_t
+ * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_cfloat      cfloat_t
+ */
+typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    typedef ::std::complex< float > __pyx_t_float_complex;
+  #else
+    typedef float _Complex __pyx_t_float_complex;
+  #endif
+#else
+    typedef struct { float real, imag; } __pyx_t_float_complex;
+#endif
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    typedef ::std::complex< double > __pyx_t_double_complex;
+  #else
+    typedef double _Complex __pyx_t_double_complex;
+  #endif
+#else
+    typedef struct { double real, imag; } __pyx_t_double_complex;
+#endif
+
+
+/*--- Type declarations ---*/
+struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
+struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+ * ctypedef npy_longdouble longdouble_t
+ * 
+ * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_cdouble     cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t
+ */
+typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+ * 
+ * ctypedef npy_cfloat      cfloat_t
+ * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_clongdouble clongdouble_t
+ * 
+ */
+typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764
+ * ctypedef npy_cfloat      cfloat_t
+ * ctypedef npy_cdouble     cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_cdouble     complex_t
+ */
+typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+ * ctypedef npy_clongdouble clongdouble_t
+ * 
+ * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):
+ */
+typedef npy_cdouble __pyx_t_5numpy_complex_t;
+
+/* "skbio/alignment/_ssw_wrapper.pyx":74
+ * 
+ * 
+ * cdef class AlignmentStructure:             # <<<<<<<<<<<<<<
+ *     """Wraps the result of an alignment c struct so it is accessible to Python
+ * 
+ */
+struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure {
+  PyObject_HEAD
+  struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_vtab;
+  s_align *p;
+  PyObject *read_sequence;
+  PyObject *reference_sequence;
+  int index_starts_at;
+  PyObject *_cigar_string;
+};
+
+
+/* "skbio/alignment/_ssw_wrapper.pyx":416
+ *         return tuples
+ * 
+ * cdef class StripedSmithWaterman:             # <<<<<<<<<<<<<<
+ *     """Performs a striped (banded) Smith Waterman Alignment.
+ * 
+ */
+struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman {
+  PyObject_HEAD
+  struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_vtab;
+  s_profile *profile;
+  __pyx_t_5numpy_uint8_t gap_open_penalty;
+  __pyx_t_5numpy_uint8_t gap_extend_penalty;
+  __pyx_t_5numpy_uint8_t bit_flag;
+  __pyx_t_5numpy_uint16_t score_filter;
+  __pyx_t_5numpy_int32_t distance_filter;
+  __pyx_t_5numpy_int32_t mask_length;
+  PyObject *read_sequence;
+  int index_starts_at;
+  PyBoolObject *is_protein;
+  PyBoolObject *suppress_sequences;
+  PyArrayObject *__pyx___KEEP_IT_IN_SCOPE_read;
+  PyArrayObject *__pyx___KEEP_IT_IN_SCOPE_matrix;
+};
+
+
+
+/* "skbio/alignment/_ssw_wrapper.pyx":74
+ * 
+ * 
+ * cdef class AlignmentStructure:             # <<<<<<<<<<<<<<
+ *     """Wraps the result of an alignment c struct so it is accessible to Python
+ * 
+ */
+
+struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure {
+  PyObject *(*__pyx___constructor)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *, s_align *);
+};
+static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
+
+
+/* "skbio/alignment/_ssw_wrapper.pyx":416
+ *         return tuples
+ * 
+ * cdef class StripedSmithWaterman:             # <<<<<<<<<<<<<<
+ *     """Performs a striped (banded) Smith Waterman Alignment.
+ * 
+ */
+
+struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman {
+  PyArrayObject *(*_seq_converter)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *);
+  PyArrayObject *(*_build_match_matrix)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *, PyObject *);
+  PyArrayObject *(*_convert_dict2d_to_matrix)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *);
+};
+static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
+#ifndef CYTHON_REFNANNY
+  #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+  typedef struct {
+    void (*INCREF)(void*, PyObject*, int);
+    void (*DECREF)(void*, PyObject*, int);
+    void (*GOTREF)(void*, PyObject*, int);
+    void (*GIVEREF)(void*, PyObject*, int);
+    void* (*SetupContext)(const char*, int, const char*);
+    void (*FinishContext)(void**);
+  } __Pyx_RefNannyAPIStruct;
+  static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
+  #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+          if (acquire_gil) { \
+              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+              PyGILState_Release(__pyx_gilstate_save); \
+          } else { \
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+          }
+#else
+  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+          __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+  #define __Pyx_RefNannyFinishContext() \
+          __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+  #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_GOTREF(r)  __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_XINCREF(r)  do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+  #define __Pyx_XDECREF(r)  do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+  #define __Pyx_XGOTREF(r)  do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+  #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+  #define __Pyx_RefNannyDeclarations
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)
+  #define __Pyx_RefNannyFinishContext()
+  #define __Pyx_INCREF(r) Py_INCREF(r)
+  #define __Pyx_DECREF(r) Py_DECREF(r)
+  #define __Pyx_GOTREF(r)
+  #define __Pyx_GIVEREF(r)
+  #define __Pyx_XINCREF(r) Py_XINCREF(r)
+  #define __Pyx_XDECREF(r) Py_XDECREF(r)
+  #define __Pyx_XGOTREF(r)
+  #define __Pyx_XGIVEREF(r)
+#endif /* CYTHON_REFNANNY */
+#define __Pyx_XDECREF_SET(r, v) do {                            \
+        PyObject *tmp = (PyObject *) r;                         \
+        r = v; __Pyx_XDECREF(tmp);                              \
+    } while (0)
+#define __Pyx_DECREF_SET(r, v) do {                             \
+        PyObject *tmp = (PyObject *) r;                         \
+        r = v; __Pyx_DECREF(tmp);                               \
+    } while (0)
+#define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+    PyTypeObject* tp = Py_TYPE(obj);
+    if (likely(tp->tp_getattro))
+        return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+    if (likely(tp->tp_getattr))
+        return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+    return PyObject_GetAttr(obj, attr_name);
+}
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
+    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
+    const char* function_name); /*proto*/
+
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/
+
+#if PY_MAJOR_VERSION < 3
+#define __Pyx_PyString_Join __Pyx_PyBytes_Join
+#define __Pyx_PyBaseString_Join(s, v) (PyUnicode_CheckExact(s) ? PyUnicode_Join(s, v) : __Pyx_PyBytes_Join(s, v))
+#else
+#define __Pyx_PyString_Join PyUnicode_Join
+#define __Pyx_PyBaseString_Join PyUnicode_Join
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+    #if PY_MAJOR_VERSION < 3
+    #define __Pyx_PyBytes_Join _PyString_Join
+    #else
+    #define __Pyx_PyBytes_Join _PyBytes_Join
+    #endif
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values); /*proto*/
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
+    PyListObject* L = (PyListObject*) list;
+    Py_ssize_t len = Py_SIZE(list);
+    if (likely(L->allocated > len)) {
+        Py_INCREF(x);
+        PyList_SET_ITEM(list, len, x);
+        Py_SIZE(list) = len+1;
+        return 0;
+    }
+    return PyList_Append(list, x);
+}
+#else
+#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+        PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
+        int has_cstart, int has_cstop, int wraparound);
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
+    PyListObject* L = (PyListObject*) list;
+    Py_ssize_t len = Py_SIZE(list);
+    if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
+        Py_INCREF(x);
+        PyList_SET_ITEM(list, len, x);
+        Py_SIZE(list) = len+1;
+        return 0;
+    }
+    return PyList_Append(list, x);
+}
+#else
+#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
+#endif
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/
+
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
+    __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \
+    (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \
+               __Pyx_GetItemInt_Generic(o, to_py_func(i))))
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
+    __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
+    (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+                                                              int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
+    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
+    __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
+    (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+                                                              int wraparound, int boundscheck);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+                                                     int is_list, int wraparound, int boundscheck);
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/
+
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/
+
+#include <string.h>
+
+static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
+
+static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
+
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
+#else
+#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
+#endif
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
+
+static CYTHON_INLINE int  __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
+    __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
+static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
+
+static void __Pyx_RaiseBufferFallbackError(void); /*proto*/
+
+static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* proto */
+
+#if PY_MAJOR_VERSION >= 3
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
+    PyObject *value;
+    value = PyDict_GetItemWithError(d, key);
+    if (unlikely(!value)) {
+        if (!PyErr_Occurred()) {
+            PyObject* args = PyTuple_Pack(1, key);
+            if (likely(args))
+                PyErr_SetObject(PyExc_KeyError, args);
+            Py_XDECREF(args);
+        }
+        return NULL;
+    }
+    Py_INCREF(value);
+    return value;
+}
+#else
+    #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
+#endif
+
+static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/
+
+#define __Pyx_BufPtrCContig1d(type, buf, i0, s0) ((type)buf + i0)
+static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value); /*proto*/
+
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /*proto*/
+
+static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name); /*proto*/
+
+typedef struct {
+  Py_ssize_t shape, strides, suboffsets;
+} __Pyx_Buf_DimInfo;
+typedef struct {
+  size_t refcount;
+  Py_buffer pybuffer;
+} __Pyx_Buffer;
+typedef struct {
+  __Pyx_Buffer *rcbuffer;
+  char *data;
+  __Pyx_Buf_DimInfo diminfo[8];
+} __Pyx_LocalBuf_ND;
+
+#if PY_MAJOR_VERSION < 3
+    static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
+    static void __Pyx_ReleaseBuffer(Py_buffer *view);
+#else
+    #define __Pyx_GetBuffer PyObject_GetBuffer
+    #define __Pyx_ReleaseBuffer PyBuffer_Release
+#endif
+
+
+static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
+
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint16(npy_uint16 value);
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int32(npy_int32 value);
+
+static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *);
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *);
+
+static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *);
+
+static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *);
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    #define __Pyx_CREAL(z) ((z).real())
+    #define __Pyx_CIMAG(z) ((z).imag())
+  #else
+    #define __Pyx_CREAL(z) (__real__(z))
+    #define __Pyx_CIMAG(z) (__imag__(z))
+  #endif
+#else
+    #define __Pyx_CREAL(z) ((z).real)
+    #define __Pyx_CIMAG(z) ((z).imag)
+#endif
+#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
+    #define __Pyx_SET_CREAL(z,x) ((z).real(x))
+    #define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
+#else
+    #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
+    #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
+#endif
+
+static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
+
+#if CYTHON_CCOMPLEX
+    #define __Pyx_c_eqf(a, b)   ((a)==(b))
+    #define __Pyx_c_sumf(a, b)  ((a)+(b))
+    #define __Pyx_c_difff(a, b) ((a)-(b))
+    #define __Pyx_c_prodf(a, b) ((a)*(b))
+    #define __Pyx_c_quotf(a, b) ((a)/(b))
+    #define __Pyx_c_negf(a)     (-(a))
+  #ifdef __cplusplus
+    #define __Pyx_c_is_zerof(z) ((z)==(float)0)
+    #define __Pyx_c_conjf(z)    (::std::conj(z))
+    #if 1
+        #define __Pyx_c_absf(z)     (::std::abs(z))
+        #define __Pyx_c_powf(a, b)  (::std::pow(a, b))
+    #endif
+  #else
+    #define __Pyx_c_is_zerof(z) ((z)==0)
+    #define __Pyx_c_conjf(z)    (conjf(z))
+    #if 1
+        #define __Pyx_c_absf(z)     (cabsf(z))
+        #define __Pyx_c_powf(a, b)  (cpowf(a, b))
+    #endif
+ #endif
+#else
+    static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
+    static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
+    #if 1
+        static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
+        static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
+    #endif
+#endif
+
+static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
+
+#if CYTHON_CCOMPLEX
+    #define __Pyx_c_eq(a, b)   ((a)==(b))
+    #define __Pyx_c_sum(a, b)  ((a)+(b))
+    #define __Pyx_c_diff(a, b) ((a)-(b))
+    #define __Pyx_c_prod(a, b) ((a)*(b))
+    #define __Pyx_c_quot(a, b) ((a)/(b))
+    #define __Pyx_c_neg(a)     (-(a))
+  #ifdef __cplusplus
+    #define __Pyx_c_is_zero(z) ((z)==(double)0)
+    #define __Pyx_c_conj(z)    (::std::conj(z))
+    #if 1
+        #define __Pyx_c_abs(z)     (::std::abs(z))
+        #define __Pyx_c_pow(a, b)  (::std::pow(a, b))
+    #endif
+  #else
+    #define __Pyx_c_is_zero(z) ((z)==0)
+    #define __Pyx_c_conj(z)    (conj(z))
+    #if 1
+        #define __Pyx_c_abs(z)     (cabs(z))
+        #define __Pyx_c_pow(a, b)  (cpow(a, b))
+    #endif
+ #endif
+#else
+    static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
+    static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
+    #if 1
+        static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
+        static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
+    #endif
+#endif
+
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+static int __Pyx_check_binary_version(void);
+
+#if !defined(__Pyx_PyIdentifier_FromString)
+#if PY_MAJOR_VERSION < 3
+  #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
+#else
+  #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
+#endif
+#endif
+
+static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
+
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);  /*proto*/
+
+typedef struct {
+    int code_line;
+    PyCodeObject* code_object;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+    int count;
+    int max_count;
+    __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+
+/* Module declarations from 'cpython.version' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'cpython.exc' */
+
+/* Module declarations from 'cpython.module' */
+
+/* Module declarations from 'cpython.mem' */
+
+/* Module declarations from 'cpython.tuple' */
+
+/* Module declarations from 'cpython.list' */
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.sequence' */
+
+/* Module declarations from 'cpython.mapping' */
+
+/* Module declarations from 'cpython.iterator' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'cpython.number' */
+
+/* Module declarations from 'cpython.int' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.bool' */
+static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0;
+
+/* Module declarations from 'cpython.long' */
+
+/* Module declarations from 'cpython.float' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.complex' */
+static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0;
+
+/* Module declarations from 'cpython.string' */
+
+/* Module declarations from 'cpython.unicode' */
+
+/* Module declarations from 'cpython.dict' */
+
+/* Module declarations from 'cpython.instance' */
+
+/* Module declarations from 'cpython.function' */
+
+/* Module declarations from 'cpython.method' */
+
+/* Module declarations from 'cpython.weakref' */
+
+/* Module declarations from 'cpython.getargs' */
+
+/* Module declarations from 'cpython.pythread' */
+
+/* Module declarations from 'cpython.pystate' */
+
+/* Module declarations from 'cpython.cobject' */
+
+/* Module declarations from 'cpython.oldbuffer' */
+
+/* Module declarations from 'cpython.set' */
+
+/* Module declarations from 'cpython.buffer' */
+
+/* Module declarations from 'cpython.bytes' */
+
+/* Module declarations from 'cpython.pycapsule' */
+
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'libc.stdlib' */
+
+/* Module declarations from 'numpy' */
+
+/* Module declarations from 'numpy' */
+static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
+static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
+static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
+
+/* Module declarations from 'skbio.alignment._ssw_wrapper' */
+static PyTypeObject *__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = 0;
+static PyTypeObject *__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = 0;
+static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t = { "int8_t", NULL, sizeof(__pyx_t_5numpy_int8_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int8_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int8_t), 0 };
+#define __Pyx_MODULE_NAME "skbio.alignment._ssw_wrapper"
+int __pyx_module_is_main_skbio__alignment___ssw_wrapper = 0;
+
+/* Implementation of 'skbio.alignment._ssw_wrapper' */
+static PyObject *__pyx_builtin_property;
+static PyObject *__pyx_builtin_range;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_Exception;
+static PyObject *__pyx_builtin_enumerate;
+static PyObject *__pyx_builtin_ord;
+static PyObject *__pyx_builtin_RuntimeError;
+static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_read_sequence, PyObject *__pyx_v_reference_sequence, PyObject *__pyx_v_index_starts_at); /* proto */
+static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_4__getitem__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_key); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_6__repr__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_8__str__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_is_zero_based); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_38_get_aligned_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_sequence, PyObject *__pyx_v_tuple_cigar, PyObject *__pyx_v_begin, PyObject *__pyx_v_end, PyObject *__pyx_v_gap_type); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_40_tuples_from_cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self); /* proto */
+static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_query_sequence, PyObject *__pyx_v_gap_open_penalty, PyObject *__pyx_v_gap_extend_penalty, PyObject *__pyx_v_score_size, PyObject *__pyx_v_mask_length, PyObject *__pyx_v_mask_auto, PyObject *__pyx_v_score_only, PyObject *__pyx_v_score_filter, PyObject *__pyx_v_distance_filter, PyObject *__pyx_v_overrid [...]
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_target_sequence); /* proto */
+static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_6_get_bit_flag(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_override_skip_babp, PyObject *__pyx_v_score_only); /* proto */
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sequence1, PyObject *__pyx_v_sequence2, PyObject *__pyx_v_kwargs); /* proto */
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
+static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static char __pyx_k_[] = ",\n";
+static char __pyx_k_B[] = "B";
+static char __pyx_k_D[] = "D";
+static char __pyx_k_H[] = "H";
+static char __pyx_k_I[] = "I";
+static char __pyx_k_L[] = "L";
+static char __pyx_k_M[] = "M";
+static char __pyx_k_N[] = "N";
+static char __pyx_k_O[] = "O";
+static char __pyx_k_Q[] = "Q";
+static char __pyx_k_b[] = "b";
+static char __pyx_k_d[] = "d";
+static char __pyx_k_f[] = "f";
+static char __pyx_k_g[] = "g";
+static char __pyx_k_h[] = "h";
+static char __pyx_k_i[] = "i";
+static char __pyx_k_l[] = "l";
+static char __pyx_k_q[] = "q";
+static char __pyx_k_s[] = "{\n%s\n}";
+static char __pyx_k_Zd[] = "Zd";
+static char __pyx_k_Zf[] = "Zf";
+static char __pyx_k_Zg[] = "Zg";
+static char __pyx_k__3[] = "...";
+static char __pyx_k__5[] = "\n";
+static char __pyx_k__6[] = "";
+static char __pyx_k__8[] = "-";
+static char __pyx_k_id[] = "id";
+static char __pyx_k_np[] = "np";
+static char __pyx_k_end[] = "end";
+static char __pyx_k_get[] = "get";
+static char __pyx_k_ord[] = "ord";
+static char __pyx_k_r_r[] = "    {!r}: {!r}";
+static char __pyx_k_int8[] = "int8";
+static char __pyx_k_join[] = "join";
+static char __pyx_k_main[] = "__main__";
+static char __pyx_k_seqs[] = "seqs";
+static char __pyx_k_test[] = "__test__";
+static char __pyx_k_ACGTN[] = "ACGTN";
+static char __pyx_k_array[] = "array";
+static char __pyx_k_begin[] = "begin";
+static char __pyx_k_cigar[] = "cigar";
+static char __pyx_k_dtype[] = "dtype";
+static char __pyx_k_empty[] = "empty";
+static char __pyx_k_numpy[] = "numpy";
+static char __pyx_k_query[] = "query";
+static char __pyx_k_range[] = "range";
+static char __pyx_k_score[] = "score";
+static char __pyx_k_format[] = "format";
+static char __pyx_k_import[] = "__import__";
+static char __pyx_k_kwargs[] = "kwargs";
+static char __pyx_k_target[] = "target";
+static char __pyx_k_Score_d[] = "Score: %d";
+static char __pyx_k_isdigit[] = "isdigit";
+static char __pyx_k_protein[] = "protein";
+static char __pyx_k_Length_d[] = "Length: %d";
+static char __pyx_k_gap_type[] = "gap_type";
+static char __pyx_k_property[] = "property";
+static char __pyx_k_sequence[] = "sequence";
+static char __pyx_k_Alignment[] = "Alignment";
+static char __pyx_k_Exception[] = "Exception";
+static char __pyx_k_alignment[] = "alignment";
+static char __pyx_k_enumerate[] = "enumerate";
+static char __pyx_k_mask_auto[] = "mask_auto";
+static char __pyx_k_mid_table[] = "mid_table";
+static char __pyx_k_query_end[] = "query_end";
+static char __pyx_k_sequence1[] = "sequence1";
+static char __pyx_k_sequence2[] = "sequence2";
+static char __pyx_k_start_end[] = "start_end";
+static char __pyx_k_ValueError[] = "ValueError";
+static char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
+static char __pyx_k_score_only[] = "score_only";
+static char __pyx_k_score_size[] = "score_size";
+static char __pyx_k_zero_index[] = "zero_index";
+static char __pyx_k_mask_length[] = "mask_length";
+static char __pyx_k_match_score[] = "match_score";
+static char __pyx_k_np_aa_table[] = "np_aa_table";
+static char __pyx_k_np_nt_table[] = "np_nt_table";
+static char __pyx_k_query_begin[] = "query_begin";
+static char __pyx_k_tuple_cigar[] = "tuple_cigar";
+static char __pyx_k_RuntimeError[] = "RuntimeError";
+static char __pyx_k_get_bit_flag[] = "_get_bit_flag";
+static char __pyx_k_score_filter[] = "score_filter";
+static char __pyx_k_target_begin[] = "target_begin";
+static char __pyx_k_is_zero_based[] = "is_zero_based";
+static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
+static char __pyx_k_read_sequence[] = "read_sequence";
+static char __pyx_k_mismatch_score[] = "mismatch_score";
+static char __pyx_k_query_sequence[] = "query_sequence";
+static char __pyx_k_set_zero_based[] = "set_zero_based";
+static char __pyx_k_skbio_sequence[] = "skbio.sequence";
+static char __pyx_k_ProteinSequence[] = "ProteinSequence";
+static char __pyx_k_distance_filter[] = "distance_filter";
+static char __pyx_k_index_starts_at[] = "index_starts_at";
+static char __pyx_k_skbio_alignment[] = "skbio.alignment";
+static char __pyx_k_target_sequence[] = "target_sequence";
+static char __pyx_k_gap_open_penalty[] = "gap_open_penalty";
+static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer";
+static char __pyx_k_tuples_from_cigar[] = "_tuples_from_cigar";
+static char __pyx_k_NucleotideSequence[] = "NucleotideSequence";
+static char __pyx_k_gap_extend_penalty[] = "gap_extend_penalty";
+static char __pyx_k_override_skip_babp[] = "override_skip_babp";
+static char __pyx_k_reference_sequence[] = "reference_sequence";
+static char __pyx_k_suppress_sequences[] = "suppress_sequences";
+static char __pyx_k_target_end_optimal[] = "target_end_optimal";
+static char __pyx_k_start_end_positions[] = "start_end_positions";
+static char __pyx_k_substitution_matrix[] = "substitution_matrix";
+static char __pyx_k_get_aligned_sequence[] = "_get_aligned_sequence";
+static char __pyx_k_target_end_suboptimal[] = "target_end_suboptimal";
+static char __pyx_k_aligned_query_sequence[] = "aligned_query_sequence";
+static char __pyx_k_ARNDCQEGHILKMFPSTWYVBZX[] = "ARNDCQEGHILKMFPSTWYVBZX*";
+static char __pyx_k_aligned_target_sequence[] = "aligned_target_sequence";
+static char __pyx_k_optimal_alignment_score[] = "optimal_alignment_score";
+static char __pyx_k_local_pairwise_align_ssw[] = "local_pairwise_align_ssw";
+static char __pyx_k_gap_open_penalty_must_be_0[] = "`gap_open_penalty` must be > 0";
+static char __pyx_k_suboptimal_alignment_score[] = "suboptimal_alignment_score";
+static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
+static char __pyx_k_gap_extend_penalty_must_be_0[] = "`gap_extend_penalty` must be > 0";
+static char __pyx_k_skbio_alignment__ssw_wrapper[] = "skbio.alignment._ssw_wrapper";
+static char __pyx_k_Users_jairideout_dev_scikit_bio[] = "/Users/jairideout/dev/scikit-bio/skbio/alignment/_ssw_wrapper.pyx";
+static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
+static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
+static char __pyx_k_Must_provide_a_substitution_matr[] = "Must provide a substitution matrix for protein sequences";
+static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
+static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
+static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
+static PyObject *__pyx_kp_s_;
+static PyObject *__pyx_n_s_ACGTN;
+static PyObject *__pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX;
+static PyObject *__pyx_n_s_Alignment;
+static PyObject *__pyx_n_s_D;
+static PyObject *__pyx_n_s_Exception;
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
+static PyObject *__pyx_n_s_I;
+static PyObject *__pyx_kp_s_Length_d;
+static PyObject *__pyx_n_s_M;
+static PyObject *__pyx_kp_s_Must_provide_a_substitution_matr;
+static PyObject *__pyx_n_s_N;
+static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
+static PyObject *__pyx_n_s_NucleotideSequence;
+static PyObject *__pyx_n_s_ProteinSequence;
+static PyObject *__pyx_n_s_RuntimeError;
+static PyObject *__pyx_kp_s_Score_d;
+static PyObject *__pyx_kp_s_Users_jairideout_dev_scikit_bio;
+static PyObject *__pyx_n_s_ValueError;
+static PyObject *__pyx_kp_s__3;
+static PyObject *__pyx_kp_s__5;
+static PyObject *__pyx_kp_s__6;
+static PyObject *__pyx_kp_s__8;
+static PyObject *__pyx_n_s_aligned_query_sequence;
+static PyObject *__pyx_n_s_aligned_target_sequence;
+static PyObject *__pyx_n_s_alignment;
+static PyObject *__pyx_n_s_array;
+static PyObject *__pyx_n_s_begin;
+static PyObject *__pyx_n_s_cigar;
+static PyObject *__pyx_n_s_distance_filter;
+static PyObject *__pyx_n_s_dtype;
+static PyObject *__pyx_n_s_empty;
+static PyObject *__pyx_n_s_end;
+static PyObject *__pyx_n_s_enumerate;
+static PyObject *__pyx_n_s_format;
+static PyObject *__pyx_n_s_gap_extend_penalty;
+static PyObject *__pyx_kp_s_gap_extend_penalty_must_be_0;
+static PyObject *__pyx_n_s_gap_open_penalty;
+static PyObject *__pyx_kp_s_gap_open_penalty_must_be_0;
+static PyObject *__pyx_n_s_gap_type;
+static PyObject *__pyx_n_s_get;
+static PyObject *__pyx_n_s_get_aligned_sequence;
+static PyObject *__pyx_n_s_get_bit_flag;
+static PyObject *__pyx_n_s_id;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_index_starts_at;
+static PyObject *__pyx_n_s_int8;
+static PyObject *__pyx_n_s_is_zero_based;
+static PyObject *__pyx_n_s_isdigit;
+static PyObject *__pyx_n_s_join;
+static PyObject *__pyx_n_s_kwargs;
+static PyObject *__pyx_n_s_local_pairwise_align_ssw;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_mask_auto;
+static PyObject *__pyx_n_s_mask_length;
+static PyObject *__pyx_n_s_match_score;
+static PyObject *__pyx_n_s_mid_table;
+static PyObject *__pyx_n_s_mismatch_score;
+static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
+static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
+static PyObject *__pyx_n_s_np;
+static PyObject *__pyx_n_s_np_aa_table;
+static PyObject *__pyx_n_s_np_nt_table;
+static PyObject *__pyx_n_s_numpy;
+static PyObject *__pyx_n_s_optimal_alignment_score;
+static PyObject *__pyx_n_s_ord;
+static PyObject *__pyx_n_s_override_skip_babp;
+static PyObject *__pyx_n_s_property;
+static PyObject *__pyx_n_s_protein;
+static PyObject *__pyx_n_s_pyx_getbuffer;
+static PyObject *__pyx_n_s_pyx_releasebuffer;
+static PyObject *__pyx_n_s_pyx_vtable;
+static PyObject *__pyx_n_s_query;
+static PyObject *__pyx_n_s_query_begin;
+static PyObject *__pyx_n_s_query_end;
+static PyObject *__pyx_n_s_query_sequence;
+static PyObject *__pyx_kp_s_r_r;
+static PyObject *__pyx_n_s_range;
+static PyObject *__pyx_n_s_read_sequence;
+static PyObject *__pyx_n_s_reference_sequence;
+static PyObject *__pyx_kp_s_s;
+static PyObject *__pyx_n_s_score;
+static PyObject *__pyx_n_s_score_filter;
+static PyObject *__pyx_n_s_score_only;
+static PyObject *__pyx_n_s_score_size;
+static PyObject *__pyx_n_s_seqs;
+static PyObject *__pyx_n_s_sequence;
+static PyObject *__pyx_n_s_sequence1;
+static PyObject *__pyx_n_s_sequence2;
+static PyObject *__pyx_n_s_set_zero_based;
+static PyObject *__pyx_n_s_skbio_alignment;
+static PyObject *__pyx_n_s_skbio_alignment__ssw_wrapper;
+static PyObject *__pyx_n_s_skbio_sequence;
+static PyObject *__pyx_n_s_start_end;
+static PyObject *__pyx_n_s_start_end_positions;
+static PyObject *__pyx_n_s_suboptimal_alignment_score;
+static PyObject *__pyx_n_s_substitution_matrix;
+static PyObject *__pyx_n_s_suppress_sequences;
+static PyObject *__pyx_n_s_target;
+static PyObject *__pyx_n_s_target_begin;
+static PyObject *__pyx_n_s_target_end_optimal;
+static PyObject *__pyx_n_s_target_end_suboptimal;
+static PyObject *__pyx_n_s_target_sequence;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_tuple_cigar;
+static PyObject *__pyx_n_s_tuples_from_cigar;
+static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
+static PyObject *__pyx_n_s_zero_index;
+static PyObject *__pyx_int_0;
+static PyObject *__pyx_int_1;
+static PyObject *__pyx_int_2;
+static PyObject *__pyx_int_3;
+static PyObject *__pyx_int_4;
+static PyObject *__pyx_int_5;
+static PyObject *__pyx_int_6;
+static PyObject *__pyx_int_7;
+static PyObject *__pyx_int_8;
+static PyObject *__pyx_int_9;
+static PyObject *__pyx_int_10;
+static PyObject *__pyx_int_11;
+static PyObject *__pyx_int_12;
+static PyObject *__pyx_int_13;
+static PyObject *__pyx_int_14;
+static PyObject *__pyx_int_15;
+static PyObject *__pyx_int_16;
+static PyObject *__pyx_int_17;
+static PyObject *__pyx_int_18;
+static PyObject *__pyx_int_19;
+static PyObject *__pyx_int_20;
+static PyObject *__pyx_int_21;
+static PyObject *__pyx_int_22;
+static PyObject *__pyx_int_23;
+static PyObject *__pyx_int_neg_1;
+static PyObject *__pyx_int_neg_3;
+static PyObject *__pyx_slice__2;
+static PyObject *__pyx_slice__4;
+static PyObject *__pyx_tuple__7;
+static PyObject *__pyx_tuple__9;
+static PyObject *__pyx_tuple__10;
+static PyObject *__pyx_tuple__11;
+static PyObject *__pyx_tuple__12;
+static PyObject *__pyx_tuple__13;
+static PyObject *__pyx_tuple__14;
+static PyObject *__pyx_tuple__15;
+static PyObject *__pyx_tuple__16;
+static PyObject *__pyx_tuple__17;
+static PyObject *__pyx_tuple__18;
+static PyObject *__pyx_codeobj__19;
+
+/* "skbio/alignment/_ssw_wrapper.pyx":107
+ *     cdef str _cigar_string
+ * 
+ *     def __cinit__(self, read_sequence, reference_sequence, index_starts_at):             # <<<<<<<<<<<<<<
+ *         # We use `read_sequence` and `reference_sequence` here as they are
+ *         # treated sematically as a private output of ssw.c like the `s_align`
+ */
+
+/* Python wrapper */
+static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyObject *__pyx_v_read_sequence = 0;
+  PyObject *__pyx_v_reference_sequence = 0;
+  PyObject *__pyx_v_index_starts_at = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_read_sequence,&__pyx_n_s_reference_sequence,&__pyx_n_s_index_starts_at,0};
+    PyObject* values[3] = {0,0,0};
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_read_sequence)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+        case  1:
+        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_reference_sequence)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+        case  2:
+        if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_index_starts_at)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+      goto __pyx_L5_argtuple_error;
+    } else {
+      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+    }
+    __pyx_v_read_sequence = values[0];
+    __pyx_v_reference_sequence = values[1];
+    __pyx_v_index_starts_at = values[2];
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return -1;
+  __pyx_L4_argument_unpacking_done:;
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cinit__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self), __pyx_v_read_sequence, __pyx_v_reference_sequence, __pyx_v_index_starts_at);
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_read_sequence, PyObject *__pyx_v_reference_sequence, PyObject *__pyx_v_index_starts_at) {
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__cinit__", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":111
+ *         # treated sematically as a private output of ssw.c like the `s_align`
+ *         # struct
+ *         self.read_sequence = read_sequence             # <<<<<<<<<<<<<<
+ *         self.reference_sequence = reference_sequence
+ *         self.index_starts_at = index_starts_at
+ */
+  if (!(likely(PyString_CheckExact(__pyx_v_read_sequence))||((__pyx_v_read_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_read_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_v_read_sequence;
+  __Pyx_INCREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __Pyx_GOTREF(__pyx_v_self->read_sequence);
+  __Pyx_DECREF(__pyx_v_self->read_sequence);
+  __pyx_v_self->read_sequence = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":112
+ *         # struct
+ *         self.read_sequence = read_sequence
+ *         self.reference_sequence = reference_sequence             # <<<<<<<<<<<<<<
+ *         self.index_starts_at = index_starts_at
+ * 
+ */
+  if (!(likely(PyString_CheckExact(__pyx_v_reference_sequence))||((__pyx_v_reference_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_reference_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_v_reference_sequence;
+  __Pyx_INCREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __Pyx_GOTREF(__pyx_v_self->reference_sequence);
+  __Pyx_DECREF(__pyx_v_self->reference_sequence);
+  __pyx_v_self->reference_sequence = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":113
+ *         self.read_sequence = read_sequence
+ *         self.reference_sequence = reference_sequence
+ *         self.index_starts_at = index_starts_at             # <<<<<<<<<<<<<<
+ * 
+ *     cdef __constructor(self, s_align* pointer):
+ */
+  __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_index_starts_at); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_self->index_starts_at = __pyx_t_2;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":107
+ *     cdef str _cigar_string
+ * 
+ *     def __cinit__(self, read_sequence, reference_sequence, index_starts_at):             # <<<<<<<<<<<<<<
+ *         # We use `read_sequence` and `reference_sequence` here as they are
+ *         # treated sematically as a private output of ssw.c like the `s_align`
+ */
+
+  /* function exit code */
+  __pyx_r = 0;
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = -1;
+  __pyx_L0:;
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":115
+ *         self.index_starts_at = index_starts_at
+ * 
+ *     cdef __constructor(self, s_align* pointer):             # <<<<<<<<<<<<<<
+ *         self.p = pointer
+ * 
+ */
+
+static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___constructor(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, s_align *__pyx_v_pointer) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__constructor", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":116
+ * 
+ *     cdef __constructor(self, s_align* pointer):
+ *         self.p = pointer             # <<<<<<<<<<<<<<
+ * 
+ *     def __dealloc__(self):
+ */
+  __pyx_v_self->p = __pyx_v_pointer;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":115
+ *         self.index_starts_at = index_starts_at
+ * 
+ *     cdef __constructor(self, s_align* pointer):             # <<<<<<<<<<<<<<
+ *         self.p = pointer
+ * 
+ */
+
+  /* function exit code */
+  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":118
+ *         self.p = pointer
+ * 
+ *     def __dealloc__(self):             # <<<<<<<<<<<<<<
+ *         if self.p is not NULL:
+ *             align_destroy(self.p)
+ */
+
+/* Python wrapper */
+static void __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_3__dealloc__(PyObject *__pyx_v_self) {
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
+  __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__dealloc__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  __Pyx_RefNannySetupContext("__dealloc__", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":119
+ * 
+ *     def __dealloc__(self):
+ *         if self.p is not NULL:             # <<<<<<<<<<<<<<
+ *             align_destroy(self.p)
+ * 
+ */
+  __pyx_t_1 = ((__pyx_v_self->p != NULL) != 0);
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":120
+ *     def __dealloc__(self):
+ *         if self.p is not NULL:
+ *             align_destroy(self.p)             # <<<<<<<<<<<<<<
+ * 
+ *     def __getitem__(self, key):
+ */
+    align_destroy(__pyx_v_self->p);
+    goto __pyx_L3;
+  }
+  __pyx_L3:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":118
+ *         self.p = pointer
+ * 
+ *     def __dealloc__(self):             # <<<<<<<<<<<<<<
+ *         if self.p is not NULL:
+ *             align_destroy(self.p)
+ */
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":122
+ *             align_destroy(self.p)
+ * 
+ *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
+ *         return getattr(self, key)
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_5__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_5__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_4__getitem__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self), ((PyObject *)__pyx_v_key));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_4__getitem__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_key) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__getitem__", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":123
+ * 
+ *     def __getitem__(self, key):
+ *         return getattr(self, key)             # <<<<<<<<<<<<<<
+ * 
+ *     def __repr__(self):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = __Pyx_GetAttr(((PyObject *)__pyx_v_self), __pyx_v_key); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":122
+ *             align_destroy(self.p)
+ * 
+ *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
+ *         return getattr(self, key)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":125
+ *         return getattr(self, key)
+ * 
+ *     def __repr__(self):             # <<<<<<<<<<<<<<
+ *         data = ['optimal_alignment_score', 'suboptimal_alignment_score',
+ *                 'query_begin', 'query_end', 'target_begin',
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_7__repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_7__repr__(PyObject *__pyx_v_self) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_6__repr__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_6__repr__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_v_data = NULL;
+  PyObject *__pyx_v_k = NULL;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  Py_ssize_t __pyx_t_3;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_6 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__repr__", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":126
+ * 
+ *     def __repr__(self):
+ *         data = ['optimal_alignment_score', 'suboptimal_alignment_score',             # <<<<<<<<<<<<<<
+ *                 'query_begin', 'query_end', 'target_begin',
+ *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
+ */
+  __pyx_t_1 = PyList_New(10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_n_s_optimal_alignment_score);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_optimal_alignment_score);
+  __Pyx_GIVEREF(__pyx_n_s_optimal_alignment_score);
+  __Pyx_INCREF(__pyx_n_s_suboptimal_alignment_score);
+  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_suboptimal_alignment_score);
+  __Pyx_GIVEREF(__pyx_n_s_suboptimal_alignment_score);
+  __Pyx_INCREF(__pyx_n_s_query_begin);
+  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_query_begin);
+  __Pyx_GIVEREF(__pyx_n_s_query_begin);
+  __Pyx_INCREF(__pyx_n_s_query_end);
+  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_n_s_query_end);
+  __Pyx_GIVEREF(__pyx_n_s_query_end);
+  __Pyx_INCREF(__pyx_n_s_target_begin);
+  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_n_s_target_begin);
+  __Pyx_GIVEREF(__pyx_n_s_target_begin);
+  __Pyx_INCREF(__pyx_n_s_target_end_optimal);
+  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_n_s_target_end_optimal);
+  __Pyx_GIVEREF(__pyx_n_s_target_end_optimal);
+  __Pyx_INCREF(__pyx_n_s_target_end_suboptimal);
+  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_n_s_target_end_suboptimal);
+  __Pyx_GIVEREF(__pyx_n_s_target_end_suboptimal);
+  __Pyx_INCREF(__pyx_n_s_cigar);
+  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_n_s_cigar);
+  __Pyx_GIVEREF(__pyx_n_s_cigar);
+  __Pyx_INCREF(__pyx_n_s_query_sequence);
+  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_n_s_query_sequence);
+  __Pyx_GIVEREF(__pyx_n_s_query_sequence);
+  __Pyx_INCREF(__pyx_n_s_target_sequence);
+  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_n_s_target_sequence);
+  __Pyx_GIVEREF(__pyx_n_s_target_sequence);
+  __pyx_v_data = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":130
+ *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
+ *                 'query_sequence', 'target_sequence']
+ *         return "{\n%s\n}" % ',\n'.join([             # <<<<<<<<<<<<<<
+ *             "    {!r}: {!r}".format(k, self[k]) for k in data])
+ * 
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":131
+ *                 'query_sequence', 'target_sequence']
+ *         return "{\n%s\n}" % ',\n'.join([
+ *             "    {!r}: {!r}".format(k, self[k]) for k in data])             # <<<<<<<<<<<<<<
+ * 
+ *     def __str__(self):
+ */
+  __pyx_t_2 = __pyx_v_data; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
+  for (;;) {
+    if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
+    #if CYTHON_COMPILING_IN_CPYTHON
+    __pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    #else
+    __pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    #endif
+    __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_4);
+    __pyx_t_4 = 0;
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_r_r, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_k); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_5);
+    __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __Pyx_INCREF(__pyx_v_k);
+    PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_k);
+    __Pyx_GIVEREF(__pyx_v_k);
+    PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_5);
+    __pyx_t_5 = 0;
+    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":130
+ *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
+ *                 'query_sequence', 'target_sequence']
+ *         return "{\n%s\n}" % ',\n'.join([             # <<<<<<<<<<<<<<
+ *             "    {!r}: {!r}".format(k, self[k]) for k in data])
+ * 
+ */
+  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s_, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_s, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":125
+ *         return getattr(self, key)
+ * 
+ *     def __repr__(self):             # <<<<<<<<<<<<<<
+ *         data = ['optimal_alignment_score', 'suboptimal_alignment_score',
+ *                 'query_begin', 'query_end', 'target_begin',
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF(__pyx_v_data);
+  __Pyx_XDECREF(__pyx_v_k);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":133
+ *             "    {!r}: {!r}".format(k, self[k]) for k in data])
+ * 
+ *     def __str__(self):             # <<<<<<<<<<<<<<
+ *         score = "Score: %d" % self.optimal_alignment_score
+ *         if self.query_sequence and self.cigar:
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_9__str__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_9__str__(PyObject *__pyx_v_self) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_8__str__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_8__str__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_v_score = NULL;
+  PyObject *__pyx_v_target = NULL;
+  PyObject *__pyx_v_query = NULL;
+  PyObject *__pyx_v_align_len = NULL;
+  PyObject *__pyx_v_length = NULL;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  int __pyx_t_3;
+  int __pyx_t_4;
+  int __pyx_t_5;
+  Py_ssize_t __pyx_t_6;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__str__", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":134
+ * 
+ *     def __str__(self):
+ *         score = "Score: %d" % self.optimal_alignment_score             # <<<<<<<<<<<<<<
+ *         if self.query_sequence and self.cigar:
+ *             target = self.aligned_target_sequence
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Score_d, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_v_score = ((PyObject*)__pyx_t_2);
+  __pyx_t_2 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":135
+ *     def __str__(self):
+ *         score = "Score: %d" % self.optimal_alignment_score
+ *         if self.query_sequence and self.cigar:             # <<<<<<<<<<<<<<
+ *             target = self.aligned_target_sequence
+ *             query = self.aligned_query_sequence
+ */
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  if (__pyx_t_3) {
+    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+    __pyx_t_5 = __pyx_t_4;
+  } else {
+    __pyx_t_5 = __pyx_t_3;
+  }
+  if (__pyx_t_5) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":136
+ *         score = "Score: %d" % self.optimal_alignment_score
+ *         if self.query_sequence and self.cigar:
+ *             target = self.aligned_target_sequence             # <<<<<<<<<<<<<<
+ *             query = self.aligned_query_sequence
+ *             align_len = len(query)
+ */
+    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_v_target = __pyx_t_2;
+    __pyx_t_2 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":137
+ *         if self.query_sequence and self.cigar:
+ *             target = self.aligned_target_sequence
+ *             query = self.aligned_query_sequence             # <<<<<<<<<<<<<<
+ *             align_len = len(query)
+ *             if align_len > 13:
+ */
+    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_v_query = __pyx_t_2;
+    __pyx_t_2 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":138
+ *             target = self.aligned_target_sequence
+ *             query = self.aligned_query_sequence
+ *             align_len = len(query)             # <<<<<<<<<<<<<<
+ *             if align_len > 13:
+ *                 target = target[:10] + "..."
+ */
+    __pyx_t_6 = PyObject_Length(__pyx_v_query); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_6); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_v_align_len = __pyx_t_2;
+    __pyx_t_2 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":139
+ *             query = self.aligned_query_sequence
+ *             align_len = len(query)
+ *             if align_len > 13:             # <<<<<<<<<<<<<<
+ *                 target = target[:10] + "..."
+ *                 query = query[:10] + "..."
+ */
+    __pyx_t_2 = PyObject_RichCompare(__pyx_v_align_len, __pyx_int_13, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+    if (__pyx_t_5) {
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":140
+ *             align_len = len(query)
+ *             if align_len > 13:
+ *                 target = target[:10] + "..."             # <<<<<<<<<<<<<<
+ *                 query = query[:10] + "..."
+ * 
+ */
+      __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_target, 0, 10, NULL, NULL, &__pyx_slice__2, 0, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_2);
+      __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+      __Pyx_DECREF_SET(__pyx_v_target, __pyx_t_1);
+      __pyx_t_1 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":141
+ *             if align_len > 13:
+ *                 target = target[:10] + "..."
+ *                 query = query[:10] + "..."             # <<<<<<<<<<<<<<
+ * 
+ *             length = "Length: %d" % align_len
+ */
+      __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_query, 0, 10, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_s__3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_2);
+      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+      __Pyx_DECREF_SET(__pyx_v_query, __pyx_t_2);
+      __pyx_t_2 = 0;
+      goto __pyx_L4;
+    }
+    __pyx_L4:;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":143
+ *                 query = query[:10] + "..."
+ * 
+ *             length = "Length: %d" % align_len             # <<<<<<<<<<<<<<
+ *             return "\n".join([query, target, score, length])
+ *         return score
+ */
+    __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Length_d, __pyx_v_align_len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_v_length = ((PyObject*)__pyx_t_2);
+    __pyx_t_2 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":144
+ * 
+ *             length = "Length: %d" % align_len
+ *             return "\n".join([query, target, score, length])             # <<<<<<<<<<<<<<
+ *         return score
+ * 
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __pyx_t_2 = PyList_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __Pyx_INCREF(__pyx_v_query);
+    PyList_SET_ITEM(__pyx_t_2, 0, __pyx_v_query);
+    __Pyx_GIVEREF(__pyx_v_query);
+    __Pyx_INCREF(__pyx_v_target);
+    PyList_SET_ITEM(__pyx_t_2, 1, __pyx_v_target);
+    __Pyx_GIVEREF(__pyx_v_target);
+    __Pyx_INCREF(__pyx_v_score);
+    PyList_SET_ITEM(__pyx_t_2, 2, __pyx_v_score);
+    __Pyx_GIVEREF(__pyx_v_score);
+    __Pyx_INCREF(__pyx_v_length);
+    PyList_SET_ITEM(__pyx_t_2, 3, __pyx_v_length);
+    __Pyx_GIVEREF(__pyx_v_length);
+    __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__5, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+    __pyx_r = __pyx_t_1;
+    __pyx_t_1 = 0;
+    goto __pyx_L0;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":145
+ *             length = "Length: %d" % align_len
+ *             return "\n".join([query, target, score, length])
+ *         return score             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(__pyx_v_score);
+  __pyx_r = __pyx_v_score;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":133
+ *             "    {!r}: {!r}".format(k, self[k]) for k in data])
+ * 
+ *     def __str__(self):             # <<<<<<<<<<<<<<
+ *         score = "Score: %d" % self.optimal_alignment_score
+ *         if self.query_sequence and self.cigar:
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF(__pyx_v_score);
+  __Pyx_XDECREF(__pyx_v_target);
+  __Pyx_XDECREF(__pyx_v_query);
+  __Pyx_XDECREF(__pyx_v_align_len);
+  __Pyx_XDECREF(__pyx_v_length);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":148
+ * 
+ *     @property
+ *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
+ *         """Optimal alignment score
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_11optimal_alignment_score(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score[] = "Optimal alignment score\n\n        Returns\n        -------\n        int\n            The optimal alignment score\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_11optimal_alignment_score(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("optimal_alignment_score (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("optimal_alignment_score", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":157
+ * 
+ *         """
+ *         return self.p.score1             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":148
+ * 
+ *     @property
+ *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
+ *         """Optimal alignment score
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.optimal_alignment_score", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":160
+ * 
+ *     @property
+ *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
+ *         """Suboptimal alignment score
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_13suboptimal_alignment_score(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score[] = "Suboptimal alignment score\n\n        Returns\n        -------\n        int\n            The suboptimal alignment score\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_13suboptimal_alignment_score(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("suboptimal_alignment_score (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("suboptimal_alignment_score", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":169
+ * 
+ *         """
+ *         return self.p.score2             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":160
+ * 
+ *     @property
+ *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
+ *         """Suboptimal alignment score
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.suboptimal_alignment_score", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":172
+ * 
+ *     @property
+ *     def target_begin(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's alignment begins
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_15target_begin(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin[] = "Character index where the target's alignment begins\n\n        Returns\n        -------\n        int\n            The character index of the target sequence's alignment's beginning\n\n        Notes\n        -----\n        The result is a 0-based index by default\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_15target_begin(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("target_begin (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("target_begin", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":185
+ * 
+ *         """
+ *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1             # <<<<<<<<<<<<<<
+ *                                                             >= 0) else -1
+ * 
+ */
+  __Pyx_XDECREF(__pyx_r);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":186
+ *         """
+ *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1
+ *                                                             >= 0) else -1             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  if (((__pyx_v_self->p->ref_begin1 >= 0) != 0)) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":185
+ * 
+ *         """
+ *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1             # <<<<<<<<<<<<<<
+ *                                                             >= 0) else -1
+ * 
+ */
+    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_t_1 = __pyx_t_2;
+    __pyx_t_2 = 0;
+  } else {
+    __Pyx_INCREF(__pyx_int_neg_1);
+    __pyx_t_1 = __pyx_int_neg_1;
+  }
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":172
+ * 
+ *     @property
+ *     def target_begin(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's alignment begins
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.target_begin", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":189
+ * 
+ *     @property
+ *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's optimal alignment ends
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_17target_end_optimal(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal[] = "Character index where the target's optimal alignment ends\n\n        Returns\n        -------\n        int\n            The character index of the target sequence's optimal alignment's\n             end\n\n        Notes\n        -----\n        The result is a 0-based index by default\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_17target_end_optimal(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("target_end_optimal (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("target_end_optimal", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":203
+ * 
+ *         """
+ *         return self.p.ref_end1 + self.index_starts_at             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":189
+ * 
+ *     @property
+ *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's optimal alignment ends
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.target_end_optimal", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":206
+ * 
+ *     @property
+ *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's suboptimal alignment ends
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_19target_end_suboptimal(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal[] = "Character index where the target's suboptimal alignment ends\n\n        Returns\n        -------\n        int\n            The character index of the target sequence's suboptimal alignment's\n             end\n\n        Notes\n        -----\n        The result is a 0-based index by default\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_19target_end_suboptimal(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("target_end_suboptimal (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("target_end_suboptimal", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":220
+ * 
+ *         """
+ *         return self.p.ref_end2 + self.index_starts_at             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end2 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":206
+ * 
+ *     @property
+ *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's suboptimal alignment ends
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.target_end_suboptimal", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":223
+ * 
+ *     @property
+ *     def query_begin(self):             # <<<<<<<<<<<<<<
+ *         """Returns the character index at which the query sequence begins
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_21query_begin(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin[] = "Returns the character index at which the query sequence begins\n\n        Returns\n        -------\n        int\n            The character index of the query sequence beginning\n\n        Notes\n        -----\n        The result is a 0-based index by default\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_21query_begin(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("query_begin (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("query_begin", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":236
+ * 
+ *         """
+ *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1             # <<<<<<<<<<<<<<
+ *                                                              >= 0) else -1
+ * 
+ */
+  __Pyx_XDECREF(__pyx_r);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":237
+ *         """
+ *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1
+ *                                                              >= 0) else -1             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  if (((__pyx_v_self->p->read_begin1 >= 0) != 0)) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":236
+ * 
+ *         """
+ *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1             # <<<<<<<<<<<<<<
+ *                                                              >= 0) else -1
+ * 
+ */
+    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_t_1 = __pyx_t_2;
+    __pyx_t_2 = 0;
+  } else {
+    __Pyx_INCREF(__pyx_int_neg_1);
+    __pyx_t_1 = __pyx_int_neg_1;
+  }
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":223
+ * 
+ *     @property
+ *     def query_begin(self):             # <<<<<<<<<<<<<<
+ *         """Returns the character index at which the query sequence begins
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.query_begin", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":240
+ * 
+ *     @property
+ *     def query_end(self):             # <<<<<<<<<<<<<<
+ *         """Character index at where query sequence ends
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_23query_end(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end[] = "Character index at where query sequence ends\n\n        Returns\n        -------\n        int\n            The character index of the query sequence ending\n\n        Notes\n        -----\n        The result is a 0-based index by default\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_23query_end(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("query_end (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("query_end", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":253
+ * 
+ *         """
+ *         return self.p.read_end1 + self.index_starts_at             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":240
+ * 
+ *     @property
+ *     def query_end(self):             # <<<<<<<<<<<<<<
+ *         """Character index at where query sequence ends
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.query_end", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":256
+ * 
+ *     @property
+ *     def cigar(self):             # <<<<<<<<<<<<<<
+ *         """Cigar formatted string for the optimal alignment
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_25cigar(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar[] = "Cigar formatted string for the optimal alignment\n\n        Returns\n        -------\n        str\n            The cigar string of the optimal alignment\n\n        Notes\n        -----\n        The cigar string format is described in [1]_ and [2]_.\n\n        If there is no cigar or optimal alignment, this will return an empty\n        string\n\n        References\n        ----------\n        .. [1]  [...]
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_25cigar(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("cigar (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_v_cigar_list = NULL;
+  __pyx_t_5numpy_int32_t __pyx_v_i;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  int __pyx_t_2;
+  PyObject *__pyx_t_3 = NULL;
+  __pyx_t_5numpy_int32_t __pyx_t_4;
+  __pyx_t_5numpy_int32_t __pyx_t_5;
+  PyObject *__pyx_t_6 = NULL;
+  int __pyx_t_7;
+  long __pyx_t_8;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("cigar", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":278
+ *         """
+ *         # Memoization! (1/2)
+ *         if self._cigar_string is not None:             # <<<<<<<<<<<<<<
+ *             return self._cigar_string
+ *         cigar_list = []
+ */
+  __pyx_t_1 = (__pyx_v_self->_cigar_string != ((PyObject*)Py_None));
+  __pyx_t_2 = (__pyx_t_1 != 0);
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":279
+ *         # Memoization! (1/2)
+ *         if self._cigar_string is not None:
+ *             return self._cigar_string             # <<<<<<<<<<<<<<
+ *         cigar_list = []
+ *         for i in range(self.p.cigarLen):
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __Pyx_INCREF(__pyx_v_self->_cigar_string);
+    __pyx_r = __pyx_v_self->_cigar_string;
+    goto __pyx_L0;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":280
+ *         if self._cigar_string is not None:
+ *             return self._cigar_string
+ *         cigar_list = []             # <<<<<<<<<<<<<<
+ *         for i in range(self.p.cigarLen):
+ *             # stored the same as that in BAM format,
+ */
+  __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_v_cigar_list = ((PyObject*)__pyx_t_3);
+  __pyx_t_3 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":281
+ *             return self._cigar_string
+ *         cigar_list = []
+ *         for i in range(self.p.cigarLen):             # <<<<<<<<<<<<<<
+ *             # stored the same as that in BAM format,
+ *             # high 28 bits: length, low 4 bits: M/I/D (0/1/2)
+ */
+  __pyx_t_4 = __pyx_v_self->p->cigarLen;
+  for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
+    __pyx_v_i = __pyx_t_5;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":286
+ * 
+ *             # Length, remove first 4 bits
+ *             cigar_list.append(str(self.p.cigar[i] >> 4))             # <<<<<<<<<<<<<<
+ *             # M/I/D, lookup first 4 bits in the mid_table
+ *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])
+ */
+    __pyx_t_3 = __Pyx_PyInt_From_long(((__pyx_v_self->p->cigar[__pyx_v_i]) >> 4)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
+    __Pyx_GIVEREF(__pyx_t_3);
+    __pyx_t_3 = 0;
+    __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_3); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":288
+ *             cigar_list.append(str(self.p.cigar[i] >> 4))
+ *             # M/I/D, lookup first 4 bits in the mid_table
+ *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])             # <<<<<<<<<<<<<<
+ *         # Memoization! (2/2)
+ *         self._cigar_string = "".join(cigar_list)
+ */
+    __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_mid_table); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __pyx_t_8 = ((__pyx_v_self->p->cigar[__pyx_v_i]) & 0xf);
+    __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, __pyx_t_8, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_6);
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_6); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":290
+ *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])
+ *         # Memoization! (2/2)
+ *         self._cigar_string = "".join(cigar_list)             # <<<<<<<<<<<<<<
+ *         return self._cigar_string
+ * 
+ */
+  __pyx_t_6 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_cigar_list); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_6);
+  if (!(likely(PyString_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GIVEREF(__pyx_t_6);
+  __Pyx_GOTREF(__pyx_v_self->_cigar_string);
+  __Pyx_DECREF(__pyx_v_self->_cigar_string);
+  __pyx_v_self->_cigar_string = ((PyObject*)__pyx_t_6);
+  __pyx_t_6 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":291
+ *         # Memoization! (2/2)
+ *         self._cigar_string = "".join(cigar_list)
+ *         return self._cigar_string             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(__pyx_v_self->_cigar_string);
+  __pyx_r = __pyx_v_self->_cigar_string;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":256
+ * 
+ *     @property
+ *     def cigar(self):             # <<<<<<<<<<<<<<
+ *         """Cigar formatted string for the optimal alignment
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.cigar", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF(__pyx_v_cigar_list);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":294
+ * 
+ *     @property
+ *     def query_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Query sequence
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_27query_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence[] = "Query sequence\n\n        Returns\n        -------\n        str\n            The query sequence\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_27query_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("query_sequence (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("query_sequence", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":303
+ * 
+ *         """
+ *         return self.read_sequence             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(__pyx_v_self->read_sequence);
+  __pyx_r = __pyx_v_self->read_sequence;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":294
+ * 
+ *     @property
+ *     def query_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Query sequence
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":306
+ * 
+ *     @property
+ *     def target_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Target sequence
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_29target_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence[] = "Target sequence\n\n        Returns\n        -------\n        str\n            The target sequence\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_29target_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("target_sequence (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("target_sequence", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":315
+ * 
+ *         """
+ *         return self.reference_sequence             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(__pyx_v_self->reference_sequence);
+  __pyx_r = __pyx_v_self->reference_sequence;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":306
+ * 
+ *     @property
+ *     def target_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Target sequence
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":318
+ * 
+ *     @property
+ *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Returns the query sequence aligned by the cigar
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_31aligned_query_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence[] = "Returns the query sequence aligned by the cigar\n\n        Returns\n        -------\n        str\n            Aligned query sequence\n\n        Notes\n        -----\n        This will return `None` if `suppress_sequences` was True when this\n        object was created\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_31aligned_query_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("aligned_query_sequence (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_6 = NULL;
+  PyObject *__pyx_t_7 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("aligned_query_sequence", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":332
+ * 
+ *         """
+ *         if self.query_sequence:             # <<<<<<<<<<<<<<
+ *             return self._get_aligned_sequence(self.query_sequence,
+ *                                               self._tuples_from_cigar(),
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":333
+ *         """
+ *         if self.query_sequence:
+ *             return self._get_aligned_sequence(self.query_sequence,             # <<<<<<<<<<<<<<
+ *                                               self._tuples_from_cigar(),
+ *                                               self.query_begin, self.query_end,
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":334
+ *         if self.query_sequence:
+ *             return self._get_aligned_sequence(self.query_sequence,
+ *                                               self._tuples_from_cigar(),             # <<<<<<<<<<<<<<
+ *                                               self.query_begin, self.query_end,
+ *                                               "D")
+ */
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":335
+ *             return self._get_aligned_sequence(self.query_sequence,
+ *                                               self._tuples_from_cigar(),
+ *                                               self.query_begin, self.query_end,             # <<<<<<<<<<<<<<
+ *                                               "D")
+ *         return None
+ */
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_end); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":333
+ *         """
+ *         if self.query_sequence:
+ *             return self._get_aligned_sequence(self.query_sequence,             # <<<<<<<<<<<<<<
+ *                                               self._tuples_from_cigar(),
+ *                                               self.query_begin, self.query_end,
+ */
+    __pyx_t_7 = PyTuple_New(5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_7);
+    PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
+    __Pyx_GIVEREF(__pyx_t_3);
+    PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_4);
+    PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_6);
+    __Pyx_INCREF(__pyx_n_s_D);
+    PyTuple_SET_ITEM(__pyx_t_7, 4, __pyx_n_s_D);
+    __Pyx_GIVEREF(__pyx_n_s_D);
+    __pyx_t_3 = 0;
+    __pyx_t_5 = 0;
+    __pyx_t_4 = 0;
+    __pyx_t_6 = 0;
+    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+    __pyx_r = __pyx_t_6;
+    __pyx_t_6 = 0;
+    goto __pyx_L0;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":337
+ *                                               self.query_begin, self.query_end,
+ *                                               "D")
+ *         return None             # <<<<<<<<<<<<<<
+ * 
+ *     @property
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(Py_None);
+  __pyx_r = Py_None;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":318
+ * 
+ *     @property
+ *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Returns the query sequence aligned by the cigar
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.aligned_query_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":340
+ * 
+ *     @property
+ *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Returns the target sequence aligned by the cigar
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_33aligned_target_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence[] = "Returns the target sequence aligned by the cigar\n\n        Returns\n        -------\n        str\n            Aligned target sequence\n\n        Notes\n        -----\n        This will return `None` if `suppress_sequences` was True when this\n        object was created\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_33aligned_target_sequence(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("aligned_target_sequence (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_6 = NULL;
+  PyObject *__pyx_t_7 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("aligned_target_sequence", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":354
+ * 
+ *         """
+ *         if self.target_sequence:             # <<<<<<<<<<<<<<
+ *             return self._get_aligned_sequence(self.target_sequence,
+ *                                               self._tuples_from_cigar(),
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":355
+ *         """
+ *         if self.target_sequence:
+ *             return self._get_aligned_sequence(self.target_sequence,             # <<<<<<<<<<<<<<
+ *                                               self._tuples_from_cigar(),
+ *                                               self.target_begin,
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":356
+ *         if self.target_sequence:
+ *             return self._get_aligned_sequence(self.target_sequence,
+ *                                               self._tuples_from_cigar(),             # <<<<<<<<<<<<<<
+ *                                               self.target_begin,
+ *                                               self.target_end_optimal,
+ */
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":357
+ *             return self._get_aligned_sequence(self.target_sequence,
+ *                                               self._tuples_from_cigar(),
+ *                                               self.target_begin,             # <<<<<<<<<<<<<<
+ *                                               self.target_end_optimal,
+ *                                               "I")
+ */
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":358
+ *                                               self._tuples_from_cigar(),
+ *                                               self.target_begin,
+ *                                               self.target_end_optimal,             # <<<<<<<<<<<<<<
+ *                                               "I")
+ *         return None
+ */
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 358; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":355
+ *         """
+ *         if self.target_sequence:
+ *             return self._get_aligned_sequence(self.target_sequence,             # <<<<<<<<<<<<<<
+ *                                               self._tuples_from_cigar(),
+ *                                               self.target_begin,
+ */
+    __pyx_t_7 = PyTuple_New(5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_7);
+    PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
+    __Pyx_GIVEREF(__pyx_t_3);
+    PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_4);
+    PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_6);
+    __Pyx_INCREF(__pyx_n_s_I);
+    PyTuple_SET_ITEM(__pyx_t_7, 4, __pyx_n_s_I);
+    __Pyx_GIVEREF(__pyx_n_s_I);
+    __pyx_t_3 = 0;
+    __pyx_t_5 = 0;
+    __pyx_t_4 = 0;
+    __pyx_t_6 = 0;
+    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+    __pyx_r = __pyx_t_6;
+    __pyx_t_6 = 0;
+    goto __pyx_L0;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":360
+ *                                               self.target_end_optimal,
+ *                                               "I")
+ *         return None             # <<<<<<<<<<<<<<
+ * 
+ *     def set_zero_based(self, is_zero_based):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(Py_None);
+  __pyx_r = Py_None;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":340
+ * 
+ *     @property
+ *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Returns the target sequence aligned by the cigar
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.aligned_target_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":362
+ *         return None
+ * 
+ *     def set_zero_based(self, is_zero_based):             # <<<<<<<<<<<<<<
+ *         """Set the aligment indices to start at 0 if True else 1 if False
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_35set_zero_based(PyObject *__pyx_v_self, PyObject *__pyx_v_is_zero_based); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based[] = "Set the aligment indices to start at 0 if True else 1 if False\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_35set_zero_based(PyObject *__pyx_v_self, PyObject *__pyx_v_is_zero_based) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("set_zero_based (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self), ((PyObject *)__pyx_v_is_zero_based));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_is_zero_based) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("set_zero_based", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":366
+ * 
+ *         """
+ *         if is_zero_based:             # <<<<<<<<<<<<<<
+ *             self.index_starts_at = 0
+ *         else:
+ */
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_is_zero_based); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":367
+ *         """
+ *         if is_zero_based:
+ *             self.index_starts_at = 0             # <<<<<<<<<<<<<<
+ *         else:
+ *             self.index_starts_at = 1
+ */
+    __pyx_v_self->index_starts_at = 0;
+    goto __pyx_L3;
+  }
+  /*else*/ {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":369
+ *             self.index_starts_at = 0
+ *         else:
+ *             self.index_starts_at = 1             # <<<<<<<<<<<<<<
+ * 
+ *     def is_zero_based(self):
+ */
+    __pyx_v_self->index_starts_at = 1;
+  }
+  __pyx_L3:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":362
+ *         return None
+ * 
+ *     def set_zero_based(self, is_zero_based):             # <<<<<<<<<<<<<<
+ *         """Set the aligment indices to start at 0 if True else 1 if False
+ * 
+ */
+
+  /* function exit code */
+  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.set_zero_based", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":371
+ *             self.index_starts_at = 1
+ * 
+ *     def is_zero_based(self):             # <<<<<<<<<<<<<<
+ *         """Returns True if alignment inidices start at 0 else False
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_37is_zero_based(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based[] = "Returns True if alignment inidices start at 0 else False\n\n        Returns\n        -------\n        bool\n            Whether the alignment inidices start at 0\n\n        ";
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_37is_zero_based(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("is_zero_based (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("is_zero_based", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":380
+ * 
+ *         """
+ *         return self.index_starts_at == 0             # <<<<<<<<<<<<<<
+ * 
+ *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_self->index_starts_at == 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":371
+ *             self.index_starts_at = 1
+ * 
+ *     def is_zero_based(self):             # <<<<<<<<<<<<<<
+ *         """Returns True if alignment inidices start at 0 else False
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.is_zero_based", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":382
+ *         return self.index_starts_at == 0
+ * 
+ *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,             # <<<<<<<<<<<<<<
+ *                               gap_type):
+ *         # Save the original index scheme and then set it to 0 (1/2)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_39_get_aligned_sequence(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_39_get_aligned_sequence(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyObject *__pyx_v_sequence = 0;
+  PyObject *__pyx_v_tuple_cigar = 0;
+  PyObject *__pyx_v_begin = 0;
+  PyObject *__pyx_v_end = 0;
+  PyObject *__pyx_v_gap_type = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("_get_aligned_sequence (wrapper)", 0);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sequence,&__pyx_n_s_tuple_cigar,&__pyx_n_s_begin,&__pyx_n_s_end,&__pyx_n_s_gap_type,0};
+    PyObject* values[5] = {0,0,0,0,0};
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sequence)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+        case  1:
+        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_tuple_cigar)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+        case  2:
+        if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_begin)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+        case  3:
+        if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_end)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+        case  4:
+        if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_gap_type)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_aligned_sequence") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
+      goto __pyx_L5_argtuple_error;
+    } else {
+      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+      values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+    }
+    __pyx_v_sequence = values[0];
+    __pyx_v_tuple_cigar = values[1];
+    __pyx_v_begin = values[2];
+    __pyx_v_end = values[3];
+    __pyx_v_gap_type = values[4];
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure._get_aligned_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return NULL;
+  __pyx_L4_argument_unpacking_done:;
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_38_get_aligned_sequence(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self), __pyx_v_sequence, __pyx_v_tuple_cigar, __pyx_v_begin, __pyx_v_end, __pyx_v_gap_type);
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_38_get_aligned_sequence(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, PyObject *__pyx_v_sequence, PyObject *__pyx_v_tuple_cigar, PyObject *__pyx_v_begin, PyObject *__pyx_v_end, PyObject *__pyx_v_gap_type) {
+  PyObject *__pyx_v_orig_z_base = NULL;
+  PyObject *__pyx_v_aligned_sequence = NULL;
+  PyObject *__pyx_v_seq = NULL;
+  PyObject *__pyx_v_index = NULL;
+  PyObject *__pyx_v_length = NULL;
+  PyObject *__pyx_v_mid = NULL;
+  PyObject *__pyx_v_i = NULL;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  Py_ssize_t __pyx_t_3;
+  PyObject *(*__pyx_t_4)(PyObject *);
+  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_6 = NULL;
+  PyObject *__pyx_t_7 = NULL;
+  PyObject *(*__pyx_t_8)(PyObject *);
+  int __pyx_t_9;
+  Py_ssize_t __pyx_t_10;
+  PyObject *(*__pyx_t_11)(PyObject *);
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_get_aligned_sequence", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":385
+ *                               gap_type):
+ *         # Save the original index scheme and then set it to 0 (1/2)
+ *         orig_z_base = self.is_zero_based()             # <<<<<<<<<<<<<<
+ *         self.set_zero_based(True)
+ *         aligned_sequence = []
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_is_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_v_orig_z_base = __pyx_t_2;
+  __pyx_t_2 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":386
+ *         # Save the original index scheme and then set it to 0 (1/2)
+ *         orig_z_base = self.is_zero_based()
+ *         self.set_zero_based(True)             # <<<<<<<<<<<<<<
+ *         aligned_sequence = []
+ *         seq = sequence[begin:end + 1]
+ */
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":387
+ *         orig_z_base = self.is_zero_based()
+ *         self.set_zero_based(True)
+ *         aligned_sequence = []             # <<<<<<<<<<<<<<
+ *         seq = sequence[begin:end + 1]
+ *         index = 0
+ */
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_v_aligned_sequence = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":388
+ *         self.set_zero_based(True)
+ *         aligned_sequence = []
+ *         seq = sequence[begin:end + 1]             # <<<<<<<<<<<<<<
+ *         index = 0
+ *         for length, mid in tuple_cigar:
+ */
+  __pyx_t_1 = PyNumber_Add(__pyx_v_end, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_sequence, 0, 0, &__pyx_v_begin, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_v_seq = __pyx_t_2;
+  __pyx_t_2 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":389
+ *         aligned_sequence = []
+ *         seq = sequence[begin:end + 1]
+ *         index = 0             # <<<<<<<<<<<<<<
+ *         for length, mid in tuple_cigar:
+ *             if mid == 'M':
+ */
+  __Pyx_INCREF(__pyx_int_0);
+  __pyx_v_index = __pyx_int_0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":390
+ *         seq = sequence[begin:end + 1]
+ *         index = 0
+ *         for length, mid in tuple_cigar:             # <<<<<<<<<<<<<<
+ *             if mid == 'M':
+ *                 aligned_sequence += [seq[i]
+ */
+  if (PyList_CheckExact(__pyx_v_tuple_cigar) || PyTuple_CheckExact(__pyx_v_tuple_cigar)) {
+    __pyx_t_2 = __pyx_v_tuple_cigar; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
+    __pyx_t_4 = NULL;
+  } else {
+    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_tuple_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext;
+  }
+  for (;;) {
+    if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) {
+      if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) {
+      if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else {
+      __pyx_t_1 = __pyx_t_4(__pyx_t_2);
+      if (unlikely(!__pyx_t_1)) {
+        PyObject* exc_type = PyErr_Occurred();
+        if (exc_type) {
+          if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        }
+        break;
+      }
+      __Pyx_GOTREF(__pyx_t_1);
+    }
+    if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) {
+      PyObject* sequence = __pyx_t_1;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      Py_ssize_t size = Py_SIZE(sequence);
+      #else
+      Py_ssize_t size = PySequence_Size(sequence);
+      #endif
+      if (unlikely(size != 2)) {
+        if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+        else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+      #if CYTHON_COMPILING_IN_CPYTHON
+      if (likely(PyTuple_CheckExact(sequence))) {
+        __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); 
+        __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); 
+      } else {
+        __pyx_t_5 = PyList_GET_ITEM(sequence, 0); 
+        __pyx_t_6 = PyList_GET_ITEM(sequence, 1); 
+      }
+      __Pyx_INCREF(__pyx_t_5);
+      __Pyx_INCREF(__pyx_t_6);
+      #else
+      __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_6);
+      #endif
+      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    } else {
+      Py_ssize_t index = -1;
+      __pyx_t_7 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_7);
+      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+      __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext;
+      index = 0; __pyx_t_5 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_5)) goto __pyx_L5_unpacking_failed;
+      __Pyx_GOTREF(__pyx_t_5);
+      index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L5_unpacking_failed;
+      __Pyx_GOTREF(__pyx_t_6);
+      if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_8 = NULL;
+      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+      goto __pyx_L6_unpacking_done;
+      __pyx_L5_unpacking_failed:;
+      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+      __pyx_t_8 = NULL;
+      if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_L6_unpacking_done:;
+    }
+    __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_5);
+    __pyx_t_5 = 0;
+    __Pyx_XDECREF_SET(__pyx_v_mid, __pyx_t_6);
+    __pyx_t_6 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":391
+ *         index = 0
+ *         for length, mid in tuple_cigar:
+ *             if mid == 'M':             # <<<<<<<<<<<<<<
+ *                 aligned_sequence += [seq[i]
+ *                                      for i in range(index, length + index)]
+ */
+    __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_mid, __pyx_n_s_M, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (__pyx_t_9) {
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":392
+ *         for length, mid in tuple_cigar:
+ *             if mid == 'M':
+ *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
+ *                                      for i in range(index, length + index)]
+ *                 index += length
+ */
+      __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":393
+ *             if mid == 'M':
+ *                 aligned_sequence += [seq[i]
+ *                                      for i in range(index, length + index)]             # <<<<<<<<<<<<<<
+ *                 index += length
+ *             elif mid == gap_type:
+ */
+      __pyx_t_6 = PyNumber_Add(__pyx_v_length, __pyx_v_index); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_6);
+      __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_INCREF(__pyx_v_index);
+      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_index);
+      __Pyx_GIVEREF(__pyx_v_index);
+      PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6);
+      __Pyx_GIVEREF(__pyx_t_6);
+      __pyx_t_6 = 0;
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_6);
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      if (PyList_CheckExact(__pyx_t_6) || PyTuple_CheckExact(__pyx_t_6)) {
+        __pyx_t_5 = __pyx_t_6; __Pyx_INCREF(__pyx_t_5); __pyx_t_10 = 0;
+        __pyx_t_11 = NULL;
+      } else {
+        __pyx_t_10 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_5);
+        __pyx_t_11 = Py_TYPE(__pyx_t_5)->tp_iternext;
+      }
+      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+      for (;;) {
+        if (!__pyx_t_11 && PyList_CheckExact(__pyx_t_5)) {
+          if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_5)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_6 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_6 = PySequence_ITEM(__pyx_t_5, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #endif
+        } else if (!__pyx_t_11 && PyTuple_CheckExact(__pyx_t_5)) {
+          if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_6 = PySequence_ITEM(__pyx_t_5, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #endif
+        } else {
+          __pyx_t_6 = __pyx_t_11(__pyx_t_5);
+          if (unlikely(!__pyx_t_6)) {
+            PyObject* exc_type = PyErr_Occurred();
+            if (exc_type) {
+              if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+              else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            }
+            break;
+          }
+          __Pyx_GOTREF(__pyx_t_6);
+        }
+        __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
+        __pyx_t_6 = 0;
+
+        /* "skbio/alignment/_ssw_wrapper.pyx":392
+ *         for length, mid in tuple_cigar:
+ *             if mid == 'M':
+ *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
+ *                                      for i in range(index, length + index)]
+ *                 index += length
+ */
+        __pyx_t_6 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __Pyx_GOTREF(__pyx_t_6);
+        if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+      }
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      __pyx_t_5 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+      __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_5));
+      __pyx_t_5 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":394
+ *                 aligned_sequence += [seq[i]
+ *                                      for i in range(index, length + index)]
+ *                 index += length             # <<<<<<<<<<<<<<
+ *             elif mid == gap_type:
+ *                 aligned_sequence += (['-'] * length)
+ */
+      __pyx_t_5 = PyNumber_InPlaceAdd(__pyx_v_index, __pyx_v_length); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_5);
+      __pyx_t_5 = 0;
+      goto __pyx_L7;
+    }
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":395
+ *                                      for i in range(index, length + index)]
+ *                 index += length
+ *             elif mid == gap_type:             # <<<<<<<<<<<<<<
+ *                 aligned_sequence += (['-'] * length)
+ *             else:
+ */
+    __pyx_t_5 = PyObject_RichCompare(__pyx_v_mid, __pyx_v_gap_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    if (__pyx_t_9) {
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":396
+ *                 index += length
+ *             elif mid == gap_type:
+ *                 aligned_sequence += (['-'] * length)             # <<<<<<<<<<<<<<
+ *             else:
+ *                 pass
+ */
+      __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_INCREF(__pyx_kp_s__8);
+      PyList_SET_ITEM(__pyx_t_5, 0, __pyx_kp_s__8);
+      __Pyx_GIVEREF(__pyx_kp_s__8);
+      { PyObject* __pyx_temp = PyNumber_InPlaceMultiply(__pyx_t_5, __pyx_v_length); if (unlikely(!__pyx_temp)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_temp);
+        __Pyx_DECREF(__pyx_t_5);
+        __pyx_t_5 = __pyx_temp;
+      }
+      __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_1));
+      __pyx_t_1 = 0;
+      goto __pyx_L7;
+    }
+    /*else*/ {
+    }
+    __pyx_L7:;
+  }
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":400
+ *                 pass
+ *         # Our sequence end is sometimes beyond the cigar:
+ *         aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]             # <<<<<<<<<<<<<<
+ *         # Revert our index scheme to the original (2/2)
+ *         self.set_zero_based(orig_z_base)
+ */
+  __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_1 = PyNumber_Subtract(__pyx_v_end, __pyx_v_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_5 = PyNumber_Add(__pyx_t_1, __pyx_int_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_v_index);
+  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_index);
+  __Pyx_GIVEREF(__pyx_v_index);
+  PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5);
+  __Pyx_GIVEREF(__pyx_t_5);
+  __pyx_t_5 = 0;
+  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_1, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (PyList_CheckExact(__pyx_t_5) || PyTuple_CheckExact(__pyx_t_5)) {
+    __pyx_t_1 = __pyx_t_5; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
+    __pyx_t_4 = NULL;
+  } else {
+    __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext;
+  }
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  for (;;) {
+    if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_1)) {
+      if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_5 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_5 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_1)) {
+      if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_5 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else {
+      __pyx_t_5 = __pyx_t_4(__pyx_t_1);
+      if (unlikely(!__pyx_t_5)) {
+        PyObject* exc_type = PyErr_Occurred();
+        if (exc_type) {
+          if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        }
+        break;
+      }
+      __Pyx_GOTREF(__pyx_t_5);
+    }
+    __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_5);
+    __pyx_t_5 = 0;
+    __pyx_t_5 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_5);
+    if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_1));
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":402
+ *         aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]
+ *         # Revert our index scheme to the original (2/2)
+ *         self.set_zero_based(orig_z_base)             # <<<<<<<<<<<<<<
+ *         return "".join(aligned_sequence)
+ * 
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_INCREF(__pyx_v_orig_z_base);
+  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_orig_z_base);
+  __Pyx_GIVEREF(__pyx_v_orig_z_base);
+  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":403
+ *         # Revert our index scheme to the original (2/2)
+ *         self.set_zero_based(orig_z_base)
+ *         return "".join(aligned_sequence)             # <<<<<<<<<<<<<<
+ * 
+ *     def _tuples_from_cigar(self):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_5 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_aligned_sequence); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __pyx_r = __pyx_t_5;
+  __pyx_t_5 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":382
+ *         return self.index_starts_at == 0
+ * 
+ *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,             # <<<<<<<<<<<<<<
+ *                               gap_type):
+ *         # Save the original index scheme and then set it to 0 (1/2)
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure._get_aligned_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF(__pyx_v_orig_z_base);
+  __Pyx_XDECREF(__pyx_v_aligned_sequence);
+  __Pyx_XDECREF(__pyx_v_seq);
+  __Pyx_XDECREF(__pyx_v_index);
+  __Pyx_XDECREF(__pyx_v_length);
+  __Pyx_XDECREF(__pyx_v_mid);
+  __Pyx_XDECREF(__pyx_v_i);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":405
+ *         return "".join(aligned_sequence)
+ * 
+ *     def _tuples_from_cigar(self):             # <<<<<<<<<<<<<<
+ *         tuples = []
+ *         length_stack = []
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_41_tuples_from_cigar(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_41_tuples_from_cigar(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("_tuples_from_cigar (wrapper)", 0);
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_40_tuples_from_cigar(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_40_tuples_from_cigar(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self) {
+  PyObject *__pyx_v_tuples = NULL;
+  PyObject *__pyx_v_length_stack = NULL;
+  PyObject *__pyx_v_character = NULL;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  Py_ssize_t __pyx_t_3;
+  PyObject *(*__pyx_t_4)(PyObject *);
+  PyObject *__pyx_t_5 = NULL;
+  int __pyx_t_6;
+  int __pyx_t_7;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_tuples_from_cigar", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":406
+ * 
+ *     def _tuples_from_cigar(self):
+ *         tuples = []             # <<<<<<<<<<<<<<
+ *         length_stack = []
+ *         for character in self.cigar:
+ */
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 406; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_v_tuples = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":407
+ *     def _tuples_from_cigar(self):
+ *         tuples = []
+ *         length_stack = []             # <<<<<<<<<<<<<<
+ *         for character in self.cigar:
+ *             if character.isdigit():
+ */
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_v_length_stack = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":408
+ *         tuples = []
+ *         length_stack = []
+ *         for character in self.cigar:             # <<<<<<<<<<<<<<
+ *             if character.isdigit():
+ *                 length_stack.append(character)
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyList_CheckExact(__pyx_t_1) || PyTuple_CheckExact(__pyx_t_1)) {
+    __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
+    __pyx_t_4 = NULL;
+  } else {
+    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext;
+  }
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  for (;;) {
+    if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) {
+      if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) {
+      if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else {
+      __pyx_t_1 = __pyx_t_4(__pyx_t_2);
+      if (unlikely(!__pyx_t_1)) {
+        PyObject* exc_type = PyErr_Occurred();
+        if (exc_type) {
+          if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        }
+        break;
+      }
+      __Pyx_GOTREF(__pyx_t_1);
+    }
+    __Pyx_XDECREF_SET(__pyx_v_character, __pyx_t_1);
+    __pyx_t_1 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":409
+ *         length_stack = []
+ *         for character in self.cigar:
+ *             if character.isdigit():             # <<<<<<<<<<<<<<
+ *                 length_stack.append(character)
+ *             else:
+ */
+    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_character, __pyx_n_s_isdigit); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    if (__pyx_t_6) {
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":410
+ *         for character in self.cigar:
+ *             if character.isdigit():
+ *                 length_stack.append(character)             # <<<<<<<<<<<<<<
+ *             else:
+ *                 tuples.append((int("".join(length_stack)), character))
+ */
+      __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_length_stack, __pyx_v_character); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      goto __pyx_L5;
+    }
+    /*else*/ {
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":412
+ *                 length_stack.append(character)
+ *             else:
+ *                 tuples.append((int("".join(length_stack)), character))             # <<<<<<<<<<<<<<
+ *                 length_stack = []
+ *         return tuples
+ */
+      __pyx_t_5 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_length_stack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __pyx_t_1 = PyNumber_Int(__pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+      __Pyx_GIVEREF(__pyx_t_1);
+      __Pyx_INCREF(__pyx_v_character);
+      PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_character);
+      __Pyx_GIVEREF(__pyx_v_character);
+      __pyx_t_1 = 0;
+      __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_tuples, __pyx_t_5); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":413
+ *             else:
+ *                 tuples.append((int("".join(length_stack)), character))
+ *                 length_stack = []             # <<<<<<<<<<<<<<
+ *         return tuples
+ * 
+ */
+      __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_DECREF_SET(__pyx_v_length_stack, ((PyObject*)__pyx_t_5));
+      __pyx_t_5 = 0;
+    }
+    __pyx_L5:;
+  }
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":414
+ *                 tuples.append((int("".join(length_stack)), character))
+ *                 length_stack = []
+ *         return tuples             # <<<<<<<<<<<<<<
+ * 
+ * cdef class StripedSmithWaterman:
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(__pyx_v_tuples);
+  __pyx_r = __pyx_v_tuples;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":405
+ *         return "".join(aligned_sequence)
+ * 
+ *     def _tuples_from_cigar(self):             # <<<<<<<<<<<<<<
+ *         tuples = []
+ *         length_stack = []
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure._tuples_from_cigar", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF(__pyx_v_tuples);
+  __Pyx_XDECREF(__pyx_v_length_stack);
+  __Pyx_XDECREF(__pyx_v_character);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":558
+ *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
+ * 
+ *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
+ *                   gap_open_penalty=5,  # BLASTN Default
+ *                   gap_extend_penalty=2,  # BLASTN Default
+ */
+
+/* Python wrapper */
+static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyObject *__pyx_v_query_sequence = 0;
+  PyObject *__pyx_v_gap_open_penalty = 0;
+  PyObject *__pyx_v_gap_extend_penalty = 0;
+  PyObject *__pyx_v_score_size = 0;
+  PyObject *__pyx_v_mask_length = 0;
+  PyObject *__pyx_v_mask_auto = 0;
+  PyObject *__pyx_v_score_only = 0;
+  PyObject *__pyx_v_score_filter = 0;
+  PyObject *__pyx_v_distance_filter = 0;
+  PyObject *__pyx_v_override_skip_babp = 0;
+  PyObject *__pyx_v_protein = 0;
+  PyObject *__pyx_v_match_score = 0;
+  PyObject *__pyx_v_mismatch_score = 0;
+  PyObject *__pyx_v_substitution_matrix = 0;
+  PyObject *__pyx_v_suppress_sequences = 0;
+  PyObject *__pyx_v_zero_index = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_query_sequence,&__pyx_n_s_gap_open_penalty,&__pyx_n_s_gap_extend_penalty,&__pyx_n_s_score_size,&__pyx_n_s_mask_length,&__pyx_n_s_mask_auto,&__pyx_n_s_score_only,&__pyx_n_s_score_filter,&__pyx_n_s_distance_filter,&__pyx_n_s_override_skip_babp,&__pyx_n_s_protein,&__pyx_n_s_match_score,&__pyx_n_s_mismatch_score,&__pyx_n_s_substitution_matrix,&__pyx_n_s_suppress_sequences,&__pyx_n_s_zero_index,0};
+    PyObject* values[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+    values[1] = ((PyObject *)__pyx_int_5);
+    values[2] = ((PyObject *)__pyx_int_2);
+    values[3] = ((PyObject *)__pyx_int_2);
+    values[4] = ((PyObject *)__pyx_int_15);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":563
+ *                   score_size=2,  # BLASTN Default
+ *                   mask_length=15,  # Minimum length for a suboptimal alignment
+ *                   mask_auto=True,             # <<<<<<<<<<<<<<
+ *                   score_only=False,
+ *                   score_filter=None,
+ */
+    values[5] = ((PyObject *)Py_True);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":564
+ *                   mask_length=15,  # Minimum length for a suboptimal alignment
+ *                   mask_auto=True,
+ *                   score_only=False,             # <<<<<<<<<<<<<<
+ *                   score_filter=None,
+ *                   distance_filter=None,
+ */
+    values[6] = ((PyObject *)Py_False);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":565
+ *                   mask_auto=True,
+ *                   score_only=False,
+ *                   score_filter=None,             # <<<<<<<<<<<<<<
+ *                   distance_filter=None,
+ *                   override_skip_babp=False,
+ */
+    values[7] = ((PyObject *)Py_None);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":566
+ *                   score_only=False,
+ *                   score_filter=None,
+ *                   distance_filter=None,             # <<<<<<<<<<<<<<
+ *                   override_skip_babp=False,
+ *                   protein=False,
+ */
+    values[8] = ((PyObject *)Py_None);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":567
+ *                   score_filter=None,
+ *                   distance_filter=None,
+ *                   override_skip_babp=False,             # <<<<<<<<<<<<<<
+ *                   protein=False,
+ *                   match_score=2,  # BLASTN Default
+ */
+    values[9] = ((PyObject *)Py_False);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":568
+ *                   distance_filter=None,
+ *                   override_skip_babp=False,
+ *                   protein=False,             # <<<<<<<<<<<<<<
+ *                   match_score=2,  # BLASTN Default
+ *                   mismatch_score=-3,  # BLASTN Default
+ */
+    values[10] = ((PyObject *)Py_False);
+    values[11] = ((PyObject *)__pyx_int_2);
+    values[12] = ((PyObject *)__pyx_int_neg_3);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":571
+ *                   match_score=2,  # BLASTN Default
+ *                   mismatch_score=-3,  # BLASTN Default
+ *                   substitution_matrix=None,             # <<<<<<<<<<<<<<
+ *                   suppress_sequences=False,
+ *                   zero_index=True):
+ */
+    values[13] = ((PyObject *)Py_None);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":572
+ *                   mismatch_score=-3,  # BLASTN Default
+ *                   substitution_matrix=None,
+ *                   suppress_sequences=False,             # <<<<<<<<<<<<<<
+ *                   zero_index=True):
+ *         # initalize our values
+ */
+    values[14] = ((PyObject *)Py_False);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":573
+ *                   substitution_matrix=None,
+ *                   suppress_sequences=False,
+ *                   zero_index=True):             # <<<<<<<<<<<<<<
+ *         # initalize our values
+ *         self.read_sequence = query_sequence
+ */
+    values[15] = ((PyObject *)Py_True);
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
+        case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
+        case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
+        case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
+        case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
+        case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
+        case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+        case  9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_query_sequence)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+        case  1:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_gap_open_penalty);
+          if (value) { values[1] = value; kw_args--; }
+        }
+        case  2:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_gap_extend_penalty);
+          if (value) { values[2] = value; kw_args--; }
+        }
+        case  3:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_score_size);
+          if (value) { values[3] = value; kw_args--; }
+        }
+        case  4:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mask_length);
+          if (value) { values[4] = value; kw_args--; }
+        }
+        case  5:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mask_auto);
+          if (value) { values[5] = value; kw_args--; }
+        }
+        case  6:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_score_only);
+          if (value) { values[6] = value; kw_args--; }
+        }
+        case  7:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_score_filter);
+          if (value) { values[7] = value; kw_args--; }
+        }
+        case  8:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_distance_filter);
+          if (value) { values[8] = value; kw_args--; }
+        }
+        case  9:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_override_skip_babp);
+          if (value) { values[9] = value; kw_args--; }
+        }
+        case 10:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_protein);
+          if (value) { values[10] = value; kw_args--; }
+        }
+        case 11:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_match_score);
+          if (value) { values[11] = value; kw_args--; }
+        }
+        case 12:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mismatch_score);
+          if (value) { values[12] = value; kw_args--; }
+        }
+        case 13:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_substitution_matrix);
+          if (value) { values[13] = value; kw_args--; }
+        }
+        case 14:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_suppress_sequences);
+          if (value) { values[14] = value; kw_args--; }
+        }
+        case 15:
+        if (kw_args > 0) {
+          PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zero_index);
+          if (value) { values[15] = value; kw_args--; }
+        }
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else {
+      switch (PyTuple_GET_SIZE(__pyx_args)) {
+        case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
+        case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
+        case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
+        case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
+        case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
+        case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
+        case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+        case  9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+    }
+    __pyx_v_query_sequence = values[0];
+    __pyx_v_gap_open_penalty = values[1];
+    __pyx_v_gap_extend_penalty = values[2];
+    __pyx_v_score_size = values[3];
+    __pyx_v_mask_length = values[4];
+    __pyx_v_mask_auto = values[5];
+    __pyx_v_score_only = values[6];
+    __pyx_v_score_filter = values[7];
+    __pyx_v_distance_filter = values[8];
+    __pyx_v_override_skip_babp = values[9];
+    __pyx_v_protein = values[10];
+    __pyx_v_match_score = values[11];
+    __pyx_v_mismatch_score = values[12];
+    __pyx_v_substitution_matrix = values[13];
+    __pyx_v_suppress_sequences = values[14];
+    __pyx_v_zero_index = values[15];
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 1, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return -1;
+  __pyx_L4_argument_unpacking_done:;
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___cinit__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self), __pyx_v_query_sequence, __pyx_v_gap_open_penalty, __pyx_v_gap_extend_penalty, __pyx_v_score_size, __pyx_v_mask_length, __pyx_v_mask_auto, __pyx_v_score_only, __pyx_v_score_filter, __pyx_v_distance_filter, __pyx_v_override_skip_babp, __pyx_v_protein, __pyx_v_match_score, __pyx_v_mismatch_score, __pyx_v_substitution [...]
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":558
+ *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
+ * 
+ *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
+ *                   gap_open_penalty=5,  # BLASTN Default
+ *                   gap_extend_penalty=2,  # BLASTN Default
+ */
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___cinit__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_query_sequence, PyObject *__pyx_v_gap_open_penalty, PyObject *__pyx_v_gap_extend_penalty, PyObject *__pyx_v_score_size, PyObject *__pyx_v_mask_length, PyObject *__pyx_v_mask_auto, PyObject *__pyx_v_score_only, PyObject *__pyx_v_score_filter, PyObject *__pyx_v_distance_filter, PyObject *__pyx_v_overrid [...]
+  PyArrayObject *__pyx_v_matrix = 0;
+  PyArrayObject *__pyx_v_read_seq = 0;
+  __pyx_t_5numpy_int32_t __pyx_v_read_length;
+  __pyx_t_5numpy_int8_t __pyx_v_s_size;
+  __pyx_t_5numpy_int32_t __pyx_v_m_width;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_matrix;
+  __Pyx_Buffer __pyx_pybuffer_matrix;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_read_seq;
+  __Pyx_Buffer __pyx_pybuffer_read_seq;
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  __pyx_t_5numpy_uint8_t __pyx_t_3;
+  __pyx_t_5numpy_int32_t __pyx_t_4;
+  __pyx_t_5numpy_int32_t __pyx_t_5;
+  __pyx_t_5numpy_uint16_t __pyx_t_6;
+  __pyx_t_5numpy_uint16_t __pyx_t_7;
+  PyObject *__pyx_t_8 = NULL;
+  PyObject *__pyx_t_9 = NULL;
+  int __pyx_t_10;
+  int __pyx_t_11;
+  PyObject *__pyx_t_12 = NULL;
+  PyObject *__pyx_t_13 = NULL;
+  PyObject *__pyx_t_14 = NULL;
+  Py_ssize_t __pyx_t_15;
+  __pyx_t_5numpy_int8_t __pyx_t_16;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__cinit__", 0);
+  __pyx_pybuffer_matrix.pybuffer.buf = NULL;
+  __pyx_pybuffer_matrix.refcount = 0;
+  __pyx_pybuffernd_matrix.data = NULL;
+  __pyx_pybuffernd_matrix.rcbuffer = &__pyx_pybuffer_matrix;
+  __pyx_pybuffer_read_seq.pybuffer.buf = NULL;
+  __pyx_pybuffer_read_seq.refcount = 0;
+  __pyx_pybuffernd_read_seq.data = NULL;
+  __pyx_pybuffernd_read_seq.rcbuffer = &__pyx_pybuffer_read_seq;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":575
+ *                   zero_index=True):
+ *         # initalize our values
+ *         self.read_sequence = query_sequence             # <<<<<<<<<<<<<<
+ *         if gap_open_penalty <= 0:
+ *             raise ValueError("`gap_open_penalty` must be > 0")
+ */
+  if (!(likely(PyString_CheckExact(__pyx_v_query_sequence))||((__pyx_v_query_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_query_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_v_query_sequence;
+  __Pyx_INCREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __Pyx_GOTREF(__pyx_v_self->read_sequence);
+  __Pyx_DECREF(__pyx_v_self->read_sequence);
+  __pyx_v_self->read_sequence = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":576
+ *         # initalize our values
+ *         self.read_sequence = query_sequence
+ *         if gap_open_penalty <= 0:             # <<<<<<<<<<<<<<
+ *             raise ValueError("`gap_open_penalty` must be > 0")
+ *         self.gap_open_penalty = gap_open_penalty
+ */
+  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_open_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":577
+ *         self.read_sequence = query_sequence
+ *         if gap_open_penalty <= 0:
+ *             raise ValueError("`gap_open_penalty` must be > 0")             # <<<<<<<<<<<<<<
+ *         self.gap_open_penalty = gap_open_penalty
+ *         if gap_extend_penalty <= 0:
+ */
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":578
+ *         if gap_open_penalty <= 0:
+ *             raise ValueError("`gap_open_penalty` must be > 0")
+ *         self.gap_open_penalty = gap_open_penalty             # <<<<<<<<<<<<<<
+ *         if gap_extend_penalty <= 0:
+ *             raise ValueError("`gap_extend_penalty` must be > 0")
+ */
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_open_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 578; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_self->gap_open_penalty = __pyx_t_3;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":579
+ *             raise ValueError("`gap_open_penalty` must be > 0")
+ *         self.gap_open_penalty = gap_open_penalty
+ *         if gap_extend_penalty <= 0:             # <<<<<<<<<<<<<<
+ *             raise ValueError("`gap_extend_penalty` must be > 0")
+ *         self.gap_extend_penalty = gap_extend_penalty
+ */
+  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_extend_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":580
+ *         self.gap_open_penalty = gap_open_penalty
+ *         if gap_extend_penalty <= 0:
+ *             raise ValueError("`gap_extend_penalty` must be > 0")             # <<<<<<<<<<<<<<
+ *         self.gap_extend_penalty = gap_extend_penalty
+ *         self.distance_filter = 0 if distance_filter is None else \
+ */
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":581
+ *         if gap_extend_penalty <= 0:
+ *             raise ValueError("`gap_extend_penalty` must be > 0")
+ *         self.gap_extend_penalty = gap_extend_penalty             # <<<<<<<<<<<<<<
+ *         self.distance_filter = 0 if distance_filter is None else \
+ *             distance_filter
+ */
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_extend_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_self->gap_extend_penalty = __pyx_t_3;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":582
+ *             raise ValueError("`gap_extend_penalty` must be > 0")
+ *         self.gap_extend_penalty = gap_extend_penalty
+ *         self.distance_filter = 0 if distance_filter is None else \             # <<<<<<<<<<<<<<
+ *             distance_filter
+ *         self.score_filter = 0 if score_filter is None else score_filter
+ */
+  __pyx_t_2 = (__pyx_v_distance_filter == Py_None);
+  if ((__pyx_t_2 != 0)) {
+    __pyx_t_4 = 0;
+  } else {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":583
+ *         self.gap_extend_penalty = gap_extend_penalty
+ *         self.distance_filter = 0 if distance_filter is None else \
+ *             distance_filter             # <<<<<<<<<<<<<<
+ *         self.score_filter = 0 if score_filter is None else score_filter
+ *         self.suppress_sequences = suppress_sequences
+ */
+    __pyx_t_5 = __Pyx_PyInt_As_npy_int32(__pyx_v_distance_filter); if (unlikely((__pyx_t_5 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __pyx_t_5;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":582
+ *             raise ValueError("`gap_extend_penalty` must be > 0")
+ *         self.gap_extend_penalty = gap_extend_penalty
+ *         self.distance_filter = 0 if distance_filter is None else \             # <<<<<<<<<<<<<<
+ *             distance_filter
+ *         self.score_filter = 0 if score_filter is None else score_filter
+ */
+  __pyx_v_self->distance_filter = __pyx_t_4;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":584
+ *         self.distance_filter = 0 if distance_filter is None else \
+ *             distance_filter
+ *         self.score_filter = 0 if score_filter is None else score_filter             # <<<<<<<<<<<<<<
+ *         self.suppress_sequences = suppress_sequences
+ *         self.is_protein = protein
+ */
+  __pyx_t_2 = (__pyx_v_score_filter == Py_None);
+  if ((__pyx_t_2 != 0)) {
+    __pyx_t_6 = 0;
+  } else {
+    __pyx_t_7 = __Pyx_PyInt_As_npy_uint16(__pyx_v_score_filter); if (unlikely((__pyx_t_7 == (npy_uint16)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __pyx_t_7;
+  }
+  __pyx_v_self->score_filter = __pyx_t_6;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":585
+ *             distance_filter
+ *         self.score_filter = 0 if score_filter is None else score_filter
+ *         self.suppress_sequences = suppress_sequences             # <<<<<<<<<<<<<<
+ *         self.is_protein = protein
+ *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)
+ */
+  if (!(likely(((__pyx_v_suppress_sequences) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_suppress_sequences, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_v_suppress_sequences;
+  __Pyx_INCREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __Pyx_GOTREF(__pyx_v_self->suppress_sequences);
+  __Pyx_DECREF(((PyObject *)__pyx_v_self->suppress_sequences));
+  __pyx_v_self->suppress_sequences = ((PyBoolObject *)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":586
+ *         self.score_filter = 0 if score_filter is None else score_filter
+ *         self.suppress_sequences = suppress_sequences
+ *         self.is_protein = protein             # <<<<<<<<<<<<<<
+ *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)
+ *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
+ */
+  if (!(likely(((__pyx_v_protein) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_protein, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_v_protein;
+  __Pyx_INCREF(__pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __Pyx_GOTREF(__pyx_v_self->is_protein);
+  __Pyx_DECREF(((PyObject *)__pyx_v_self->is_protein));
+  __pyx_v_self->is_protein = ((PyBoolObject *)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":587
+ *         self.suppress_sequences = suppress_sequences
+ *         self.is_protein = protein
+ *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)             # <<<<<<<<<<<<<<
+ *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
+ *         # Dijkstra knows what's up:
+ */
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_bit_flag); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_8);
+  __Pyx_INCREF(__pyx_v_override_skip_babp);
+  PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_override_skip_babp);
+  __Pyx_GIVEREF(__pyx_v_override_skip_babp);
+  __Pyx_INCREF(__pyx_v_score_only);
+  PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_score_only);
+  __Pyx_GIVEREF(__pyx_v_score_only);
+  __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_8, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_9);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_t_9); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+  __pyx_v_self->bit_flag = __pyx_t_3;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":590
+ *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
+ *         # Dijkstra knows what's up:
+ *         self.index_starts_at = 0 if zero_index else 1             # <<<<<<<<<<<<<<
+ *         # set up our matrix
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
+ */
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_zero_index); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_2) {
+    __pyx_t_10 = 0;
+  } else {
+    __pyx_t_10 = 1;
+  }
+  __pyx_v_self->index_starts_at = __pyx_t_10;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":593
+ *         # set up our matrix
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
+ *         if substitution_matrix is None:             # <<<<<<<<<<<<<<
+ *             if protein:
+ *                 raise Exception("Must provide a substitution matrix for"
+ */
+  __pyx_t_2 = (__pyx_v_substitution_matrix == Py_None);
+  __pyx_t_11 = (__pyx_t_2 != 0);
+  if (__pyx_t_11) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":594
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
+ *         if substitution_matrix is None:
+ *             if protein:             # <<<<<<<<<<<<<<
+ *                 raise Exception("Must provide a substitution matrix for"
+ *                                 " protein sequences")
+ */
+    __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_protein); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (__pyx_t_11) {
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":595
+ *         if substitution_matrix is None:
+ *             if protein:
+ *                 raise Exception("Must provide a substitution matrix for"             # <<<<<<<<<<<<<<
+ *                                 " protein sequences")
+ *             matrix = self._build_match_matrix(match_score, mismatch_score)
+ */
+      __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_Exception, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_9);
+      __Pyx_Raise(__pyx_t_9, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":597
+ *                 raise Exception("Must provide a substitution matrix for"
+ *                                 " protein sequences")
+ *             matrix = self._build_match_matrix(match_score, mismatch_score)             # <<<<<<<<<<<<<<
+ *         else:
+ *             matrix = self._convert_dict2d_to_matrix(substitution_matrix)
+ */
+    __pyx_t_9 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_build_match_matrix(__pyx_v_self, __pyx_v_match_score, __pyx_v_mismatch_score)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_9);
+    {
+      __Pyx_BufFmt_StackElem __pyx_stack[1];
+      __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer);
+      __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_9), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+      if (unlikely(__pyx_t_10 < 0)) {
+        PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
+        if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)__pyx_v_matrix, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
+          Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
+          __Pyx_RaiseBufferFallbackError();
+        } else {
+          PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
+        }
+      }
+      __pyx_pybuffernd_matrix.diminfo[0].strides = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_matrix.diminfo[0].shape = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.shape[0];
+      if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    __pyx_v_matrix = ((PyArrayObject *)__pyx_t_9);
+    __pyx_t_9 = 0;
+    goto __pyx_L5;
+  }
+  /*else*/ {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":599
+ *             matrix = self._build_match_matrix(match_score, mismatch_score)
+ *         else:
+ *             matrix = self._convert_dict2d_to_matrix(substitution_matrix)             # <<<<<<<<<<<<<<
+ *         # Set up our mask_length
+ *         # Mask is recommended to be max(query_sequence/2, 15)
+ */
+    __pyx_t_9 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_substitution_matrix)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_9);
+    {
+      __Pyx_BufFmt_StackElem __pyx_stack[1];
+      __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer);
+      __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_9), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+      if (unlikely(__pyx_t_10 < 0)) {
+        PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12);
+        if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)__pyx_v_matrix, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
+          Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12);
+          __Pyx_RaiseBufferFallbackError();
+        } else {
+          PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12);
+        }
+      }
+      __pyx_pybuffernd_matrix.diminfo[0].strides = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_matrix.diminfo[0].shape = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.shape[0];
+      if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    __pyx_v_matrix = ((PyArrayObject *)__pyx_t_9);
+    __pyx_t_9 = 0;
+  }
+  __pyx_L5:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":602
+ *         # Set up our mask_length
+ *         # Mask is recommended to be max(query_sequence/2, 15)
+ *         if mask_auto:             # <<<<<<<<<<<<<<
+ *             self.mask_length = len(query_sequence) / 2
+ *             if self.mask_length < mask_length:
+ */
+  __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_mask_auto); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_11) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":603
+ *         # Mask is recommended to be max(query_sequence/2, 15)
+ *         if mask_auto:
+ *             self.mask_length = len(query_sequence) / 2             # <<<<<<<<<<<<<<
+ *             if self.mask_length < mask_length:
+ *                 self.mask_length = mask_length
+ */
+    __pyx_t_15 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_v_self->mask_length = __Pyx_div_Py_ssize_t(__pyx_t_15, 2);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":604
+ *         if mask_auto:
+ *             self.mask_length = len(query_sequence) / 2
+ *             if self.mask_length < mask_length:             # <<<<<<<<<<<<<<
+ *                 self.mask_length = mask_length
+ *         else:
+ */
+    __pyx_t_9 = __Pyx_PyInt_From_npy_int32(__pyx_v_self->mask_length); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_9);
+    __pyx_t_8 = PyObject_RichCompare(__pyx_t_9, __pyx_v_mask_length, Py_LT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+    __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+    if (__pyx_t_11) {
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":605
+ *             self.mask_length = len(query_sequence) / 2
+ *             if self.mask_length < mask_length:
+ *                 self.mask_length = mask_length             # <<<<<<<<<<<<<<
+ *         else:
+ *             self.mask_length = mask_length
+ */
+      __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 605; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_v_self->mask_length = __pyx_t_4;
+      goto __pyx_L8;
+    }
+    __pyx_L8:;
+    goto __pyx_L7;
+  }
+  /*else*/ {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":607
+ *                 self.mask_length = mask_length
+ *         else:
+ *             self.mask_length = mask_length             # <<<<<<<<<<<<<<
+ * 
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] read_seq
+ */
+    __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_v_self->mask_length = __pyx_t_4;
+  }
+  __pyx_L7:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":610
+ * 
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] read_seq
+ *         read_seq = self._seq_converter(query_sequence)             # <<<<<<<<<<<<<<
+ * 
+ *         cdef cnp.int32_t read_length
+ */
+  __pyx_t_8 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_query_sequence)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 610; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_8);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer);
+    __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_8), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_10 < 0)) {
+      PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer, (PyObject*)__pyx_v_read_seq, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
+      }
+    }
+    __pyx_pybuffernd_read_seq.diminfo[0].strides = __pyx_pybuffernd_read_seq.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_read_seq.diminfo[0].shape = __pyx_pybuffernd_read_seq.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 610; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_v_read_seq = ((PyArrayObject *)__pyx_t_8);
+  __pyx_t_8 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":613
+ * 
+ *         cdef cnp.int32_t read_length
+ *         read_length = len(query_sequence)             # <<<<<<<<<<<<<<
+ * 
+ *         cdef cnp.int8_t s_size
+ */
+  __pyx_t_15 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 613; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_read_length = __pyx_t_15;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":616
+ * 
+ *         cdef cnp.int8_t s_size
+ *         s_size = score_size             # <<<<<<<<<<<<<<
+ * 
+ *         cdef cnp.int32_t m_width
+ */
+  __pyx_t_16 = __Pyx_PyInt_As_npy_int8(__pyx_v_score_size); if (unlikely((__pyx_t_16 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_s_size = __pyx_t_16;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":619
+ * 
+ *         cdef cnp.int32_t m_width
+ *         m_width = 24 if self.is_protein else 5             # <<<<<<<<<<<<<<
+ * 
+ *         cdef s_profile* p
+ */
+  __pyx_t_11 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_11) {
+    __pyx_t_4 = 24;
+  } else {
+    __pyx_t_4 = 5;
+  }
+  __pyx_v_m_width = __pyx_t_4;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":622
+ * 
+ *         cdef s_profile* p
+ *         self.profile = ssw_init(<cnp.int8_t*> read_seq.data,             # <<<<<<<<<<<<<<
+ *                                 read_length,
+ *                                 <cnp.int8_t*> matrix.data,
+ */
+  __pyx_v_self->profile = ssw_init(((__pyx_t_5numpy_int8_t *)__pyx_v_read_seq->data), __pyx_v_read_length, ((__pyx_t_5numpy_int8_t *)__pyx_v_matrix->data), __pyx_v_m_width, __pyx_v_s_size);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":629
+ * 
+ *         # A hack to keep the python GC from eating our data
+ *         self.__KEEP_IT_IN_SCOPE_read = read_seq             # <<<<<<<<<<<<<<
+ *         self.__KEEP_IT_IN_SCOPE_matrix = matrix
+ * 
+ */
+  __Pyx_INCREF(((PyObject *)__pyx_v_read_seq));
+  __Pyx_GIVEREF(((PyObject *)__pyx_v_read_seq));
+  __Pyx_GOTREF(__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_read);
+  __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_read));
+  __pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_read = ((PyArrayObject *)__pyx_v_read_seq);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":630
+ *         # A hack to keep the python GC from eating our data
+ *         self.__KEEP_IT_IN_SCOPE_read = read_seq
+ *         self.__KEEP_IT_IN_SCOPE_matrix = matrix             # <<<<<<<<<<<<<<
+ * 
+ *     def __call__(self, target_sequence):
+ */
+  __Pyx_INCREF(((PyObject *)__pyx_v_matrix));
+  __Pyx_GIVEREF(((PyObject *)__pyx_v_matrix));
+  __Pyx_GOTREF(__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_matrix);
+  __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_matrix));
+  __pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_matrix = ((PyArrayObject *)__pyx_v_matrix);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":558
+ *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
+ * 
+ *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
+ *                   gap_open_penalty=5,  # BLASTN Default
+ *                   gap_extend_penalty=2,  # BLASTN Default
+ */
+
+  /* function exit code */
+  __pyx_r = 0;
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_8);
+  __Pyx_XDECREF(__pyx_t_9);
+  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = -1;
+  goto __pyx_L2;
+  __pyx_L0:;
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_matrix);
+  __Pyx_XDECREF((PyObject *)__pyx_v_read_seq);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":632
+ *         self.__KEEP_IT_IN_SCOPE_matrix = matrix
+ * 
+ *     def __call__(self, target_sequence):             # <<<<<<<<<<<<<<
+ *         """Align `target_sequence` to `query_sequence`
+ * 
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_3__call__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__[] = "Align `target_sequence` to `query_sequence`\n\n        Parameters\n        ----------\n        target_sequence : str\n\n        Returns\n        -------\n        skbio.alignment.AlignmentStructure\n            The resulting alignment.\n\n        ";
+#if CYTHON_COMPILING_IN_CPYTHON
+struct wrapperbase __pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__;
+#endif
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_3__call__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyObject *__pyx_v_target_sequence = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__call__ (wrapper)", 0);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_target_sequence,0};
+    PyObject* values[1] = {0};
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_target_sequence)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
+      goto __pyx_L5_argtuple_error;
+    } else {
+      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+    }
+    __pyx_v_target_sequence = values[0];
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return NULL;
+  __pyx_L4_argument_unpacking_done:;
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self), __pyx_v_target_sequence);
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_target_sequence) {
+  PyObject *__pyx_v_reference_sequence = NULL;
+  PyArrayObject *__pyx_v_reference = 0;
+  __pyx_t_5numpy_int32_t __pyx_v_ref_length;
+  s_align *__pyx_v_align;
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_alignment = NULL;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_reference;
+  __Pyx_Buffer __pyx_pybuffer_reference;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  Py_ssize_t __pyx_t_6;
+  int __pyx_t_7;
+  PyObject *__pyx_t_8 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__call__", 0);
+  __pyx_pybuffer_reference.pybuffer.buf = NULL;
+  __pyx_pybuffer_reference.refcount = 0;
+  __pyx_pybuffernd_reference.data = NULL;
+  __pyx_pybuffernd_reference.rcbuffer = &__pyx_pybuffer_reference;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":645
+ * 
+ *         """
+ *         reference_sequence = target_sequence             # <<<<<<<<<<<<<<
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] reference
+ *         reference = self._seq_converter(reference_sequence)
+ */
+  __Pyx_INCREF(__pyx_v_target_sequence);
+  __pyx_v_reference_sequence = __pyx_v_target_sequence;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":647
+ *         reference_sequence = target_sequence
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] reference
+ *         reference = self._seq_converter(reference_sequence)             # <<<<<<<<<<<<<<
+ * 
+ *         cdef cnp.int32_t ref_length
+ */
+  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_reference_sequence)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_reference.rcbuffer->pybuffer);
+    __pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_reference.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_1), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_2 < 0)) {
+      PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_reference.rcbuffer->pybuffer, (PyObject*)__pyx_v_reference, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
+      }
+    }
+    __pyx_pybuffernd_reference.diminfo[0].strides = __pyx_pybuffernd_reference.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_reference.diminfo[0].shape = __pyx_pybuffernd_reference.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_v_reference = ((PyArrayObject *)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":650
+ * 
+ *         cdef cnp.int32_t ref_length
+ *         ref_length = len(reference_sequence)             # <<<<<<<<<<<<<<
+ * 
+ *         cdef s_align *align
+ */
+  __pyx_t_6 = PyObject_Length(__pyx_v_reference_sequence); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_ref_length = __pyx_t_6;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":653
+ * 
+ *         cdef s_align *align
+ *         align = ssw_align(self.profile, <cnp.int8_t*> reference.data,             # <<<<<<<<<<<<<<
+ *                           ref_length, self.gap_open_penalty,
+ *                           self.gap_extend_penalty, self.bit_flag,
+ */
+  __pyx_v_align = ssw_align(__pyx_v_self->profile, ((__pyx_t_5numpy_int8_t *)__pyx_v_reference->data), __pyx_v_ref_length, __pyx_v_self->gap_open_penalty, __pyx_v_self->gap_extend_penalty, __pyx_v_self->bit_flag, __pyx_v_self->score_filter, __pyx_v_self->distance_filter, __pyx_v_self->mask_length);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":660
+ * 
+ *         # Cython won't let me do this correctly, so duplicate code ahoy:
+ *         if self.suppress_sequences:             # <<<<<<<<<<<<<<
+ *             alignment = AlignmentStructure("", "", self.index_starts_at)
+ *         else:
+ */
+  __pyx_t_7 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->suppress_sequences)); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_7) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":661
+ *         # Cython won't let me do this correctly, so duplicate code ahoy:
+ *         if self.suppress_sequences:
+ *             alignment = AlignmentStructure("", "", self.index_starts_at)             # <<<<<<<<<<<<<<
+ *         else:
+ *             alignment = AlignmentStructure(self.read_sequence,
+ */
+    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_8);
+    __Pyx_INCREF(__pyx_kp_s__6);
+    PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_kp_s__6);
+    __Pyx_GIVEREF(__pyx_kp_s__6);
+    __Pyx_INCREF(__pyx_kp_s__6);
+    PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_kp_s__6);
+    __Pyx_GIVEREF(__pyx_kp_s__6);
+    PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
+    __Pyx_GIVEREF(__pyx_t_1);
+    __pyx_t_1 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+    __pyx_v_alignment = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_t_1);
+    __pyx_t_1 = 0;
+    goto __pyx_L3;
+  }
+  /*else*/ {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":665
+ *             alignment = AlignmentStructure(self.read_sequence,
+ *                                            reference_sequence,
+ *                                            self.index_starts_at)             # <<<<<<<<<<<<<<
+ *         alignment.__constructor(align)  # Hack to get a pointer through
+ *         return alignment
+ */
+    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":663
+ *             alignment = AlignmentStructure("", "", self.index_starts_at)
+ *         else:
+ *             alignment = AlignmentStructure(self.read_sequence,             # <<<<<<<<<<<<<<
+ *                                            reference_sequence,
+ *                                            self.index_starts_at)
+ */
+    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_8);
+    __Pyx_INCREF(__pyx_v_self->read_sequence);
+    PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_self->read_sequence);
+    __Pyx_GIVEREF(__pyx_v_self->read_sequence);
+    __Pyx_INCREF(__pyx_v_reference_sequence);
+    PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_reference_sequence);
+    __Pyx_GIVEREF(__pyx_v_reference_sequence);
+    PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
+    __Pyx_GIVEREF(__pyx_t_1);
+    __pyx_t_1 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+    __pyx_v_alignment = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_t_1);
+    __pyx_t_1 = 0;
+  }
+  __pyx_L3:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":666
+ *                                            reference_sequence,
+ *                                            self.index_starts_at)
+ *         alignment.__constructor(align)  # Hack to get a pointer through             # <<<<<<<<<<<<<<
+ *         return alignment
+ * 
+ */
+  __pyx_t_1 = ((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_alignment->__pyx_vtab)->__pyx___constructor(__pyx_v_alignment, __pyx_v_align); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":667
+ *                                            self.index_starts_at)
+ *         alignment.__constructor(align)  # Hack to get a pointer through
+ *         return alignment             # <<<<<<<<<<<<<<
+ * 
+ *     def __dealloc__(self):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(((PyObject *)__pyx_v_alignment));
+  __pyx_r = ((PyObject *)__pyx_v_alignment);
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":632
+ *         self.__KEEP_IT_IN_SCOPE_matrix = matrix
+ * 
+ *     def __call__(self, target_sequence):             # <<<<<<<<<<<<<<
+ *         """Align `target_sequence` to `query_sequence`
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_8);
+  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_reference.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  goto __pyx_L2;
+  __pyx_L0:;
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_reference.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XDECREF(__pyx_v_reference_sequence);
+  __Pyx_XDECREF((PyObject *)__pyx_v_reference);
+  __Pyx_XDECREF((PyObject *)__pyx_v_alignment);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":669
+ *         return alignment
+ * 
+ *     def __dealloc__(self):             # <<<<<<<<<<<<<<
+ *         if self.profile is not NULL:
+ *             init_destroy(self.profile)
+ */
+
+/* Python wrapper */
+static void __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_5__dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_5__dealloc__(PyObject *__pyx_v_self) {
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
+  __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__dealloc__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self) {
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  __Pyx_RefNannySetupContext("__dealloc__", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":670
+ * 
+ *     def __dealloc__(self):
+ *         if self.profile is not NULL:             # <<<<<<<<<<<<<<
+ *             init_destroy(self.profile)
+ * 
+ */
+  __pyx_t_1 = ((__pyx_v_self->profile != NULL) != 0);
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":671
+ *     def __dealloc__(self):
+ *         if self.profile is not NULL:
+ *             init_destroy(self.profile)             # <<<<<<<<<<<<<<
+ * 
+ *     def _get_bit_flag(self, override_skip_babp, score_only):
+ */
+    init_destroy(__pyx_v_self->profile);
+    goto __pyx_L3;
+  }
+  __pyx_L3:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":669
+ *         return alignment
+ * 
+ *     def __dealloc__(self):             # <<<<<<<<<<<<<<
+ *         if self.profile is not NULL:
+ *             init_destroy(self.profile)
+ */
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":673
+ *             init_destroy(self.profile)
+ * 
+ *     def _get_bit_flag(self, override_skip_babp, score_only):             # <<<<<<<<<<<<<<
+ *         bit_flag = 0
+ *         if score_only:
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_7_get_bit_flag(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_7_get_bit_flag(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyObject *__pyx_v_override_skip_babp = 0;
+  PyObject *__pyx_v_score_only = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("_get_bit_flag (wrapper)", 0);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_override_skip_babp,&__pyx_n_s_score_only,0};
+    PyObject* values[2] = {0,0};
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_override_skip_babp)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+        case  1:
+        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_score_only)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_bit_flag") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+      goto __pyx_L5_argtuple_error;
+    } else {
+      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+    }
+    __pyx_v_override_skip_babp = values[0];
+    __pyx_v_score_only = values[1];
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._get_bit_flag", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return NULL;
+  __pyx_L4_argument_unpacking_done:;
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_6_get_bit_flag(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self), __pyx_v_override_skip_babp, __pyx_v_score_only);
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_6_get_bit_flag(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_override_skip_babp, PyObject *__pyx_v_score_only) {
+  long __pyx_v_bit_flag;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  PyObject *__pyx_t_2 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_get_bit_flag", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":674
+ * 
+ *     def _get_bit_flag(self, override_skip_babp, score_only):
+ *         bit_flag = 0             # <<<<<<<<<<<<<<
+ *         if score_only:
+ *             return bit_flag
+ */
+  __pyx_v_bit_flag = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":675
+ *     def _get_bit_flag(self, override_skip_babp, score_only):
+ *         bit_flag = 0
+ *         if score_only:             # <<<<<<<<<<<<<<
+ *             return bit_flag
+ *         if override_skip_babp:
+ */
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_score_only); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":676
+ *         bit_flag = 0
+ *         if score_only:
+ *             return bit_flag             # <<<<<<<<<<<<<<
+ *         if override_skip_babp:
+ *             bit_flag = bit_flag | 0x8
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_r = __pyx_t_2;
+    __pyx_t_2 = 0;
+    goto __pyx_L0;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":677
+ *         if score_only:
+ *             return bit_flag
+ *         if override_skip_babp:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x8
+ *         if self.distance_filter != 0:
+ */
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_override_skip_babp); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":678
+ *             return bit_flag
+ *         if override_skip_babp:
+ *             bit_flag = bit_flag | 0x8             # <<<<<<<<<<<<<<
+ *         if self.distance_filter != 0:
+ *             bit_flag = bit_flag | 0x4
+ */
+    __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x8);
+    goto __pyx_L4;
+  }
+  __pyx_L4:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":679
+ *         if override_skip_babp:
+ *             bit_flag = bit_flag | 0x8
+ *         if self.distance_filter != 0:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x4
+ *         if self.score_filter != 0:
+ */
+  __pyx_t_1 = ((__pyx_v_self->distance_filter != 0) != 0);
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":680
+ *             bit_flag = bit_flag | 0x8
+ *         if self.distance_filter != 0:
+ *             bit_flag = bit_flag | 0x4             # <<<<<<<<<<<<<<
+ *         if self.score_filter != 0:
+ *             bit_flag = bit_flag | 0x2
+ */
+    __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x4);
+    goto __pyx_L5;
+  }
+  __pyx_L5:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":681
+ *         if self.distance_filter != 0:
+ *             bit_flag = bit_flag | 0x4
+ *         if self.score_filter != 0:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x2
+ *         if bit_flag == 0 or bit_flag == 8:
+ */
+  __pyx_t_1 = ((__pyx_v_self->score_filter != 0) != 0);
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":682
+ *             bit_flag = bit_flag | 0x4
+ *         if self.score_filter != 0:
+ *             bit_flag = bit_flag | 0x2             # <<<<<<<<<<<<<<
+ *         if bit_flag == 0 or bit_flag == 8:
+ *             bit_flag = bit_flag | 0x1
+ */
+    __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x2);
+    goto __pyx_L6;
+  }
+  __pyx_L6:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":683
+ *         if self.score_filter != 0:
+ *             bit_flag = bit_flag | 0x2
+ *         if bit_flag == 0 or bit_flag == 8:             # <<<<<<<<<<<<<<
+ *             bit_flag = bit_flag | 0x1
+ *         return bit_flag
+ */
+  switch (__pyx_v_bit_flag) {
+    case 0:
+    case 8:
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":684
+ *             bit_flag = bit_flag | 0x2
+ *         if bit_flag == 0 or bit_flag == 8:
+ *             bit_flag = bit_flag | 0x1             # <<<<<<<<<<<<<<
+ *         return bit_flag
+ * 
+ */
+    __pyx_v_bit_flag = (__pyx_v_bit_flag | 0x1);
+    break;
+    default: break;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":685
+ *         if bit_flag == 0 or bit_flag == 8:
+ *             bit_flag = bit_flag | 0x1
+ *         return bit_flag             # <<<<<<<<<<<<<<
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 685; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_r = __pyx_t_2;
+  __pyx_t_2 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":673
+ *             init_destroy(self.profile)
+ * 
+ *     def _get_bit_flag(self, override_skip_babp, score_only):             # <<<<<<<<<<<<<<
+ *         bit_flag = 0
+ *         if score_only:
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._get_bit_flag", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":687
+ *         return bit_flag
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(             # <<<<<<<<<<<<<<
+ *             self,
+ *             sequence):
+ */
+
+static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__seq_converter(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_sequence) {
+  PyArrayObject *__pyx_v_seq = 0;
+  PyObject *__pyx_v_i = NULL;
+  PyObject *__pyx_v_char = NULL;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_seq;
+  __Pyx_Buffer __pyx_pybuffer_seq;
+  PyArrayObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  Py_ssize_t __pyx_t_3;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_6 = NULL;
+  PyArrayObject *__pyx_t_7 = NULL;
+  int __pyx_t_8;
+  PyObject *__pyx_t_9 = NULL;
+  PyObject *__pyx_t_10 = NULL;
+  PyObject *__pyx_t_11 = NULL;
+  int __pyx_t_12;
+  PyObject *(*__pyx_t_13)(PyObject *);
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_seq_converter", 0);
+  __pyx_pybuffer_seq.pybuffer.buf = NULL;
+  __pyx_pybuffer_seq.refcount = 0;
+  __pyx_pybuffernd_seq.data = NULL;
+  __pyx_pybuffernd_seq.rcbuffer = &__pyx_pybuffer_seq;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":691
+ *             sequence):
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
+ *         seq = np.empty(len(sequence), dtype=np.int8)             # <<<<<<<<<<<<<<
+ *         if self.is_protein:
+ *             for i, char in enumerate(sequence):
+ */
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_3 = PyObject_Length(__pyx_v_sequence); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_6);
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+  __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_6);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_7 = ((PyArrayObject *)__pyx_t_6);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_seq.rcbuffer->pybuffer);
+    __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_seq.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_8 < 0)) {
+      PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_seq.rcbuffer->pybuffer, (PyObject*)__pyx_v_seq, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11);
+      }
+    }
+    __pyx_pybuffernd_seq.diminfo[0].strides = __pyx_pybuffernd_seq.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_seq.diminfo[0].shape = __pyx_pybuffernd_seq.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_7 = 0;
+  __pyx_v_seq = ((PyArrayObject *)__pyx_t_6);
+  __pyx_t_6 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":692
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
+ *         seq = np.empty(len(sequence), dtype=np.int8)
+ *         if self.is_protein:             # <<<<<<<<<<<<<<
+ *             for i, char in enumerate(sequence):
+ *                 seq[i] = np_aa_table[ord(char)]
+ */
+  __pyx_t_12 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_12) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":693
+ *         seq = np.empty(len(sequence), dtype=np.int8)
+ *         if self.is_protein:
+ *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
+ *                 seq[i] = np_aa_table[ord(char)]
+ *         else:
+ */
+    __Pyx_INCREF(__pyx_int_0);
+    __pyx_t_6 = __pyx_int_0;
+    if (PyList_CheckExact(__pyx_v_sequence) || PyTuple_CheckExact(__pyx_v_sequence)) {
+      __pyx_t_1 = __pyx_v_sequence; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
+      __pyx_t_13 = NULL;
+    } else {
+      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext;
+    }
+    for (;;) {
+      if (!__pyx_t_13 && PyList_CheckExact(__pyx_t_1)) {
+        if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else if (!__pyx_t_13 && PyTuple_CheckExact(__pyx_t_1)) {
+        if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else {
+        __pyx_t_4 = __pyx_t_13(__pyx_t_1);
+        if (unlikely(!__pyx_t_4)) {
+          PyObject* exc_type = PyErr_Occurred();
+          if (exc_type) {
+            if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          }
+          break;
+        }
+        __Pyx_GOTREF(__pyx_t_4);
+      }
+      __Pyx_XDECREF_SET(__pyx_v_char, __pyx_t_4);
+      __pyx_t_4 = 0;
+      __Pyx_INCREF(__pyx_t_6);
+      __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
+      __pyx_t_4 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __Pyx_DECREF(__pyx_t_6);
+      __pyx_t_6 = __pyx_t_4;
+      __pyx_t_4 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":694
+ *         if self.is_protein:
+ *             for i, char in enumerate(sequence):
+ *                 seq[i] = np_aa_table[ord(char)]             # <<<<<<<<<<<<<<
+ *         else:
+ *             for i, char in enumerate(sequence):
+ */
+      __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_aa_table); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_2);
+      __Pyx_INCREF(__pyx_v_char);
+      PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_char);
+      __Pyx_GIVEREF(__pyx_v_char);
+      __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+      __pyx_t_2 = PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __Pyx_GOTREF(__pyx_t_2);
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_2) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+    }
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+    goto __pyx_L3;
+  }
+  /*else*/ {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":696
+ *                 seq[i] = np_aa_table[ord(char)]
+ *         else:
+ *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
+ *                 seq[i] = np_nt_table[ord(char)]
+ *         return seq
+ */
+    __Pyx_INCREF(__pyx_int_0);
+    __pyx_t_6 = __pyx_int_0;
+    if (PyList_CheckExact(__pyx_v_sequence) || PyTuple_CheckExact(__pyx_v_sequence)) {
+      __pyx_t_1 = __pyx_v_sequence; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
+      __pyx_t_13 = NULL;
+    } else {
+      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext;
+    }
+    for (;;) {
+      if (!__pyx_t_13 && PyList_CheckExact(__pyx_t_1)) {
+        if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else if (!__pyx_t_13 && PyTuple_CheckExact(__pyx_t_1)) {
+        if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else {
+        __pyx_t_2 = __pyx_t_13(__pyx_t_1);
+        if (unlikely(!__pyx_t_2)) {
+          PyObject* exc_type = PyErr_Occurred();
+          if (exc_type) {
+            if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          }
+          break;
+        }
+        __Pyx_GOTREF(__pyx_t_2);
+      }
+      __Pyx_XDECREF_SET(__pyx_v_char, __pyx_t_2);
+      __pyx_t_2 = 0;
+      __Pyx_INCREF(__pyx_t_6);
+      __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
+      __pyx_t_2 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_2);
+      __Pyx_DECREF(__pyx_t_6);
+      __pyx_t_6 = __pyx_t_2;
+      __pyx_t_2 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":697
+ *         else:
+ *             for i, char in enumerate(sequence):
+ *                 seq[i] = np_nt_table[ord(char)]             # <<<<<<<<<<<<<<
+ *         return seq
+ * 
+ */
+      __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_nt_table); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_2);
+      __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_INCREF(__pyx_v_char);
+      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_char);
+      __Pyx_GIVEREF(__pyx_v_char);
+      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      __pyx_t_5 = PyObject_GetItem(__pyx_t_2, __pyx_t_4); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_5) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    }
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+  }
+  __pyx_L3:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":698
+ *             for i, char in enumerate(sequence):
+ *                 seq[i] = np_nt_table[ord(char)]
+ *         return seq             # <<<<<<<<<<<<<<
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
+ */
+  __Pyx_XDECREF(((PyObject *)__pyx_r));
+  __Pyx_INCREF(((PyObject *)__pyx_v_seq));
+  __pyx_r = ((PyArrayObject *)__pyx_v_seq);
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":687
+ *         return bit_flag
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(             # <<<<<<<<<<<<<<
+ *             self,
+ *             sequence):
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
+  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_seq.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._seq_converter", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  goto __pyx_L2;
+  __pyx_L0:;
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_seq.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_seq);
+  __Pyx_XDECREF(__pyx_v_i);
+  __Pyx_XDECREF(__pyx_v_char);
+  __Pyx_XGIVEREF((PyObject *)__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":700
+ *         return seq
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
+ *             _build_match_matrix(self, match_score, mismatch_score):
+ *         sequence_order = "ACGTN"
+ */
+
+static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__build_match_matrix(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_match_score, PyObject *__pyx_v_mismatch_score) {
+  PyObject *__pyx_v_sequence_order = NULL;
+  PyObject *__pyx_v_dict2d = NULL;
+  PyObject *__pyx_v_row = NULL;
+  PyObject *__pyx_v_column = NULL;
+  PyArrayObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  Py_ssize_t __pyx_t_2;
+  PyObject *(*__pyx_t_3)(PyObject *);
+  PyObject *__pyx_t_4 = NULL;
+  Py_ssize_t __pyx_t_5;
+  PyObject *(*__pyx_t_6)(PyObject *);
+  PyObject *__pyx_t_7 = NULL;
+  int __pyx_t_8;
+  int __pyx_t_9;
+  int __pyx_t_10;
+  PyObject *__pyx_t_11 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_build_match_matrix", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":702
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
+ *             _build_match_matrix(self, match_score, mismatch_score):
+ *         sequence_order = "ACGTN"             # <<<<<<<<<<<<<<
+ *         dict2d = {}
+ *         for row in sequence_order:
+ */
+  __Pyx_INCREF(__pyx_n_s_ACGTN);
+  __pyx_v_sequence_order = __pyx_n_s_ACGTN;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":703
+ *             _build_match_matrix(self, match_score, mismatch_score):
+ *         sequence_order = "ACGTN"
+ *         dict2d = {}             # <<<<<<<<<<<<<<
+ *         for row in sequence_order:
+ *             dict2d[row] = {}
+ */
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_v_dict2d = ((PyObject*)__pyx_t_1);
+  __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":704
+ *         sequence_order = "ACGTN"
+ *         dict2d = {}
+ *         for row in sequence_order:             # <<<<<<<<<<<<<<
+ *             dict2d[row] = {}
+ *             for column in sequence_order:
+ */
+  if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+    __pyx_t_1 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
+    __pyx_t_3 = NULL;
+  } else {
+    __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext;
+  }
+  for (;;) {
+    if (!__pyx_t_3 && PyList_CheckExact(__pyx_t_1)) {
+      if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else if (!__pyx_t_3 && PyTuple_CheckExact(__pyx_t_1)) {
+      if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else {
+      __pyx_t_4 = __pyx_t_3(__pyx_t_1);
+      if (unlikely(!__pyx_t_4)) {
+        PyObject* exc_type = PyErr_Occurred();
+        if (exc_type) {
+          if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        }
+        break;
+      }
+      __Pyx_GOTREF(__pyx_t_4);
+    }
+    __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_4);
+    __pyx_t_4 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":705
+ *         dict2d = {}
+ *         for row in sequence_order:
+ *             dict2d[row] = {}             # <<<<<<<<<<<<<<
+ *             for column in sequence_order:
+ *                 if column == 'N' or row == 'N':
+ */
+    __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    if (unlikely(PyDict_SetItem(__pyx_v_dict2d, __pyx_v_row, __pyx_t_4) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":706
+ *         for row in sequence_order:
+ *             dict2d[row] = {}
+ *             for column in sequence_order:             # <<<<<<<<<<<<<<
+ *                 if column == 'N' or row == 'N':
+ *                     dict2d[row][column] = 0
+ */
+    if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+      __pyx_t_4 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
+      __pyx_t_6 = NULL;
+    } else {
+      __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext;
+    }
+    for (;;) {
+      if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) {
+        if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) {
+        if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else {
+        __pyx_t_7 = __pyx_t_6(__pyx_t_4);
+        if (unlikely(!__pyx_t_7)) {
+          PyObject* exc_type = PyErr_Occurred();
+          if (exc_type) {
+            if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          }
+          break;
+        }
+        __Pyx_GOTREF(__pyx_t_7);
+      }
+      __Pyx_XDECREF_SET(__pyx_v_column, __pyx_t_7);
+      __pyx_t_7 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":707
+ *             dict2d[row] = {}
+ *             for column in sequence_order:
+ *                 if column == 'N' or row == 'N':             # <<<<<<<<<<<<<<
+ *                     dict2d[row][column] = 0
+ *                 else:
+ */
+      __pyx_t_8 = (__Pyx_PyString_Equals(__pyx_v_column, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (!__pyx_t_8) {
+        __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_row, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_10 = __pyx_t_9;
+      } else {
+        __pyx_t_10 = __pyx_t_8;
+      }
+      if (__pyx_t_10) {
+
+        /* "skbio/alignment/_ssw_wrapper.pyx":708
+ *             for column in sequence_order:
+ *                 if column == 'N' or row == 'N':
+ *                     dict2d[row][column] = 0             # <<<<<<<<<<<<<<
+ *                 else:
+ *                     dict2d[row][column] = match_score if row == column \
+ */
+        __pyx_t_7 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __Pyx_GOTREF(__pyx_t_7);
+        if (unlikely(PyObject_SetItem(__pyx_t_7, __pyx_v_column, __pyx_int_0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+        goto __pyx_L7;
+      }
+      /*else*/ {
+
+        /* "skbio/alignment/_ssw_wrapper.pyx":711
+ *                 else:
+ *                     dict2d[row][column] = match_score if row == column \
+ *                         else mismatch_score             # <<<<<<<<<<<<<<
+ *         return self._convert_dict2d_to_matrix(dict2d)
+ * 
+ */
+        __pyx_t_11 = PyObject_RichCompare(__pyx_v_row, __pyx_v_column, Py_EQ); __Pyx_XGOTREF(__pyx_t_11); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+        /* "skbio/alignment/_ssw_wrapper.pyx":710
+ *                     dict2d[row][column] = 0
+ *                 else:
+ *                     dict2d[row][column] = match_score if row == column \             # <<<<<<<<<<<<<<
+ *                         else mismatch_score
+ *         return self._convert_dict2d_to_matrix(dict2d)
+ */
+        __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_11); if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+        if (__pyx_t_10) {
+          __Pyx_INCREF(__pyx_v_match_score);
+          __pyx_t_7 = __pyx_v_match_score;
+        } else {
+
+          /* "skbio/alignment/_ssw_wrapper.pyx":711
+ *                 else:
+ *                     dict2d[row][column] = match_score if row == column \
+ *                         else mismatch_score             # <<<<<<<<<<<<<<
+ *         return self._convert_dict2d_to_matrix(dict2d)
+ * 
+ */
+          __Pyx_INCREF(__pyx_v_mismatch_score);
+          __pyx_t_7 = __pyx_v_mismatch_score;
+        }
+
+        /* "skbio/alignment/_ssw_wrapper.pyx":710
+ *                     dict2d[row][column] = 0
+ *                 else:
+ *                     dict2d[row][column] = match_score if row == column \             # <<<<<<<<<<<<<<
+ *                         else mismatch_score
+ *         return self._convert_dict2d_to_matrix(dict2d)
+ */
+        __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __Pyx_GOTREF(__pyx_t_11);
+        if (unlikely(PyObject_SetItem(__pyx_t_11, __pyx_v_column, __pyx_t_7) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+        __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+      }
+      __pyx_L7:;
+    }
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":712
+ *                     dict2d[row][column] = match_score if row == column \
+ *                         else mismatch_score
+ *         return self._convert_dict2d_to_matrix(dict2d)             # <<<<<<<<<<<<<<
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
+ */
+  __Pyx_XDECREF(((PyObject *)__pyx_r));
+  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_dict2d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 712; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = ((PyArrayObject *)__pyx_t_1);
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":700
+ *         return seq
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
+ *             _build_match_matrix(self, match_score, mismatch_score):
+ *         sequence_order = "ACGTN"
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_XDECREF(__pyx_t_11);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._build_match_matrix", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XDECREF(__pyx_v_sequence_order);
+  __Pyx_XDECREF(__pyx_v_dict2d);
+  __Pyx_XDECREF(__pyx_v_row);
+  __Pyx_XDECREF(__pyx_v_column);
+  __Pyx_XGIVEREF((PyObject *)__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":714
+ *         return self._convert_dict2d_to_matrix(dict2d)
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
+ *             _convert_dict2d_to_matrix(self, dict2d):
+ *         if self.is_protein:
+ */
+
+static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__convert_dict2d_to_matrix(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_dict2d) {
+  PyObject *__pyx_v_sequence_order = NULL;
+  int __pyx_v_i;
+  PyObject *__pyx_v_length = NULL;
+  PyArrayObject *__pyx_v_py_list_matrix = 0;
+  PyObject *__pyx_v_row = NULL;
+  PyObject *__pyx_v_column = NULL;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_py_list_matrix;
+  __Pyx_Buffer __pyx_pybuffer_py_list_matrix;
+  PyArrayObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  Py_ssize_t __pyx_t_2;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_6 = NULL;
+  PyObject *__pyx_t_7 = NULL;
+  PyArrayObject *__pyx_t_8 = NULL;
+  PyObject *(*__pyx_t_9)(PyObject *);
+  Py_ssize_t __pyx_t_10;
+  PyObject *(*__pyx_t_11)(PyObject *);
+  __pyx_t_5numpy_int8_t __pyx_t_12;
+  int __pyx_t_13;
+  int __pyx_t_14;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_convert_dict2d_to_matrix", 0);
+  __pyx_pybuffer_py_list_matrix.pybuffer.buf = NULL;
+  __pyx_pybuffer_py_list_matrix.refcount = 0;
+  __pyx_pybuffernd_py_list_matrix.data = NULL;
+  __pyx_pybuffernd_py_list_matrix.rcbuffer = &__pyx_pybuffer_py_list_matrix;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":716
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
+ *             _convert_dict2d_to_matrix(self, dict2d):
+ *         if self.is_protein:             # <<<<<<<<<<<<<<
+ *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
+ *         else:
+ */
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_1) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":717
+ *             _convert_dict2d_to_matrix(self, dict2d):
+ *         if self.is_protein:
+ *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"             # <<<<<<<<<<<<<<
+ *         else:
+ *             sequence_order = "ACGTN"
+ */
+    __Pyx_INCREF(__pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX);
+    __pyx_v_sequence_order = __pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX;
+    goto __pyx_L3;
+  }
+  /*else*/ {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":719
+ *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
+ *         else:
+ *             sequence_order = "ACGTN"             # <<<<<<<<<<<<<<
+ *         cdef int i = 0
+ *         length = len(sequence_order)
+ */
+    __Pyx_INCREF(__pyx_n_s_ACGTN);
+    __pyx_v_sequence_order = __pyx_n_s_ACGTN;
+  }
+  __pyx_L3:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":720
+ *         else:
+ *             sequence_order = "ACGTN"
+ *         cdef int i = 0             # <<<<<<<<<<<<<<
+ *         length = len(sequence_order)
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
+ */
+  __pyx_v_i = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":721
+ *             sequence_order = "ACGTN"
+ *         cdef int i = 0
+ *         length = len(sequence_order)             # <<<<<<<<<<<<<<
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
+ *             np.empty(length*length, dtype=np.int8)
+ */
+  __pyx_t_2 = PyObject_Length(__pyx_v_sequence_order); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_v_length = __pyx_t_3;
+  __pyx_t_3 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":723
+ *         length = len(sequence_order)
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
+ *             np.empty(length*length, dtype=np.int8)             # <<<<<<<<<<<<<<
+ *         for row in sequence_order:
+ *             for column in sequence_order:
+ */
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = PyNumber_Multiply(__pyx_v_length, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+  __Pyx_GIVEREF(__pyx_t_3);
+  __pyx_t_3 = 0;
+  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_6);
+  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int8); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_7);
+  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+  __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_7);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_8 = ((PyArrayObject *)__pyx_t_7);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
+      __pyx_v_py_list_matrix = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.buf = NULL;
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    } else {__pyx_pybuffernd_py_list_matrix.diminfo[0].strides = __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_py_list_matrix.diminfo[0].shape = __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.shape[0];
+    }
+  }
+  __pyx_t_8 = 0;
+  __pyx_v_py_list_matrix = ((PyArrayObject *)__pyx_t_7);
+  __pyx_t_7 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":724
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
+ *             np.empty(length*length, dtype=np.int8)
+ *         for row in sequence_order:             # <<<<<<<<<<<<<<
+ *             for column in sequence_order:
+ *                 py_list_matrix[i] = dict2d[row][column]
+ */
+  if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+    __pyx_t_7 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_7); __pyx_t_2 = 0;
+    __pyx_t_9 = NULL;
+  } else {
+    __pyx_t_2 = -1; __pyx_t_7 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_7);
+    __pyx_t_9 = Py_TYPE(__pyx_t_7)->tp_iternext;
+  }
+  for (;;) {
+    if (!__pyx_t_9 && PyList_CheckExact(__pyx_t_7)) {
+      if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_7)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_3 = PyList_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else if (!__pyx_t_9 && PyTuple_CheckExact(__pyx_t_7)) {
+      if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_7)) break;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #else
+      __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      #endif
+    } else {
+      __pyx_t_3 = __pyx_t_9(__pyx_t_7);
+      if (unlikely(!__pyx_t_3)) {
+        PyObject* exc_type = PyErr_Occurred();
+        if (exc_type) {
+          if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        }
+        break;
+      }
+      __Pyx_GOTREF(__pyx_t_3);
+    }
+    __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_3);
+    __pyx_t_3 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":725
+ *             np.empty(length*length, dtype=np.int8)
+ *         for row in sequence_order:
+ *             for column in sequence_order:             # <<<<<<<<<<<<<<
+ *                 py_list_matrix[i] = dict2d[row][column]
+ *                 i += 1
+ */
+    if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+      __pyx_t_3 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_3); __pyx_t_10 = 0;
+      __pyx_t_11 = NULL;
+    } else {
+      __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext;
+    }
+    for (;;) {
+      if (!__pyx_t_11 && PyList_CheckExact(__pyx_t_3)) {
+        if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_3)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_5 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else if (!__pyx_t_11 && PyTuple_CheckExact(__pyx_t_3)) {
+        if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #endif
+      } else {
+        __pyx_t_5 = __pyx_t_11(__pyx_t_3);
+        if (unlikely(!__pyx_t_5)) {
+          PyObject* exc_type = PyErr_Occurred();
+          if (exc_type) {
+            if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          }
+          break;
+        }
+        __Pyx_GOTREF(__pyx_t_5);
+      }
+      __Pyx_XDECREF_SET(__pyx_v_column, __pyx_t_5);
+      __pyx_t_5 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":726
+ *         for row in sequence_order:
+ *             for column in sequence_order:
+ *                 py_list_matrix[i] = dict2d[row][column]             # <<<<<<<<<<<<<<
+ *                 i += 1
+ *         return py_list_matrix
+ */
+      __pyx_t_5 = PyObject_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __Pyx_GOTREF(__pyx_t_5);
+      __pyx_t_4 = PyObject_GetItem(__pyx_t_5, __pyx_v_column); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __Pyx_GOTREF(__pyx_t_4);
+      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      __pyx_t_12 = __Pyx_PyInt_As_npy_int8(__pyx_t_4); if (unlikely((__pyx_t_12 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_13 = __pyx_v_i;
+      __pyx_t_14 = -1;
+      if (__pyx_t_13 < 0) {
+        __pyx_t_13 += __pyx_pybuffernd_py_list_matrix.diminfo[0].shape;
+        if (unlikely(__pyx_t_13 < 0)) __pyx_t_14 = 0;
+      } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_py_list_matrix.diminfo[0].shape)) __pyx_t_14 = 0;
+      if (unlikely(__pyx_t_14 != -1)) {
+        __Pyx_RaiseBufferIndexError(__pyx_t_14);
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+      *__Pyx_BufPtrCContig1d(__pyx_t_5numpy_int8_t *, __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_py_list_matrix.diminfo[0].strides) = __pyx_t_12;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":727
+ *             for column in sequence_order:
+ *                 py_list_matrix[i] = dict2d[row][column]
+ *                 i += 1             # <<<<<<<<<<<<<<
+ *         return py_list_matrix
+ * 
+ */
+      __pyx_v_i = (__pyx_v_i + 1);
+    }
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":728
+ *                 py_list_matrix[i] = dict2d[row][column]
+ *                 i += 1
+ *         return py_list_matrix             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+  __Pyx_XDECREF(((PyObject *)__pyx_r));
+  __Pyx_INCREF(((PyObject *)__pyx_v_py_list_matrix));
+  __pyx_r = ((PyArrayObject *)__pyx_v_py_list_matrix);
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":714
+ *         return self._convert_dict2d_to_matrix(dict2d)
+ * 
+ *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
+ *             _convert_dict2d_to_matrix(self, dict2d):
+ *         if self.is_protein:
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_XDECREF(__pyx_t_7);
+  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._convert_dict2d_to_matrix", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  goto __pyx_L2;
+  __pyx_L0:;
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XDECREF(__pyx_v_sequence_order);
+  __Pyx_XDECREF(__pyx_v_length);
+  __Pyx_XDECREF((PyObject *)__pyx_v_py_list_matrix);
+  __Pyx_XDECREF(__pyx_v_row);
+  __Pyx_XDECREF(__pyx_v_column);
+  __Pyx_XGIVEREF((PyObject *)__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "skbio/alignment/_ssw_wrapper.pyx":731
+ * 
+ * 
+ * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
+ *                              **kwargs):
+ *     """Align query and target sequences with Striped Smith-Waterman.
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw[] = "Align query and target sequences with Striped Smith-Waterman.\n\n    Parameters\n    ----------\n    sequence1 : str or BiologicalSequence\n        The first unaligned sequence\n    sequence2 : str or BiologicalSequence\n        The second unaligned sequence\n\n    Returns\n    -------\n    ``skbio.alignment.Alignment``\n        The resulting alignment as an Alignment object\n\n    Notes\n    -----\n     [...]
+static PyMethodDef __pyx_mdef_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw = {__Pyx_NAMESTR("local_pairwise_align_ssw"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw)};
+static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyObject *__pyx_v_sequence1 = 0;
+  PyObject *__pyx_v_sequence2 = 0;
+  PyObject *__pyx_v_kwargs = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("local_pairwise_align_ssw (wrapper)", 0);
+  __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL;
+  __Pyx_GOTREF(__pyx_v_kwargs);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sequence1,&__pyx_n_s_sequence2,0};
+    PyObject* values[2] = {0,0};
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sequence1)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+        case  1:
+        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sequence2)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("local_pairwise_align_ssw", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "local_pairwise_align_ssw") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+      goto __pyx_L5_argtuple_error;
+    } else {
+      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+    }
+    __pyx_v_sequence1 = values[0];
+    __pyx_v_sequence2 = values[1];
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("local_pairwise_align_ssw", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0;
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.local_pairwise_align_ssw", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return NULL;
+  __pyx_L4_argument_unpacking_done:;
+  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw(__pyx_self, __pyx_v_sequence1, __pyx_v_sequence2, __pyx_v_kwargs);
+
+  /* function exit code */
+  __Pyx_XDECREF(__pyx_v_kwargs);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sequence1, PyObject *__pyx_v_sequence2, PyObject *__pyx_v_kwargs) {
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_query = NULL;
+  PyObject *__pyx_v_alignment = NULL;
+  PyObject *__pyx_v_start_end = NULL;
+  PyObject *__pyx_v_seqs = NULL;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  int __pyx_t_3;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_6 = NULL;
+  PyObject *__pyx_t_7 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("local_pairwise_align_ssw", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":773
+ *     # We need the sequences for `Alignment` to make sense, so don't let the
+ *     # user suppress them.
+ *     kwargs['suppress_sequences'] = False             # <<<<<<<<<<<<<<
+ *     kwargs['zero_index'] = True
+ * 
+ */
+  if (unlikely(PyDict_SetItem(__pyx_v_kwargs, __pyx_n_s_suppress_sequences, Py_False) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 773; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":774
+ *     # user suppress them.
+ *     kwargs['suppress_sequences'] = False
+ *     kwargs['zero_index'] = True             # <<<<<<<<<<<<<<
+ * 
+ *     if isinstance(sequence1, ProteinSequence):
+ */
+  if (unlikely(PyDict_SetItem(__pyx_v_kwargs, __pyx_n_s_zero_index, Py_True) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":776
+ *     kwargs['zero_index'] = True
+ * 
+ *     if isinstance(sequence1, ProteinSequence):             # <<<<<<<<<<<<<<
+ *         kwargs['protein'] = True
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = PyObject_IsInstance(__pyx_v_sequence1, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_3 = (__pyx_t_2 != 0);
+  if (__pyx_t_3) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":777
+ * 
+ *     if isinstance(sequence1, ProteinSequence):
+ *         kwargs['protein'] = True             # <<<<<<<<<<<<<<
+ * 
+ *     query = StripedSmithWaterman(str(sequence1), **kwargs)
+ */
+    if (unlikely(PyDict_SetItem(__pyx_v_kwargs, __pyx_n_s_protein, Py_True) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    goto __pyx_L3;
+  }
+  __pyx_L3:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":779
+ *         kwargs['protein'] = True
+ * 
+ *     query = StripedSmithWaterman(str(sequence1), **kwargs)             # <<<<<<<<<<<<<<
+ *     alignment = query(str(sequence2))
+ * 
+ */
+  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_v_sequence1);
+  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_sequence1);
+  __Pyx_GIVEREF(__pyx_v_sequence1);
+  __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
+  __Pyx_GIVEREF(__pyx_t_4);
+  __pyx_t_4 = 0;
+  __pyx_t_4 = __pyx_v_kwargs;
+  __Pyx_INCREF(__pyx_t_4);
+  __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman)), __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_v_query = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_t_5);
+  __pyx_t_5 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":780
+ * 
+ *     query = StripedSmithWaterman(str(sequence1), **kwargs)
+ *     alignment = query(str(sequence2))             # <<<<<<<<<<<<<<
+ * 
+ *     # If there is no cigar, then it has failed a filter. Return None.
+ */
+  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_INCREF(__pyx_v_sequence2);
+  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_sequence2);
+  __Pyx_GIVEREF(__pyx_v_sequence2);
+  __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
+  __Pyx_GIVEREF(__pyx_t_4);
+  __pyx_t_4 = 0;
+  __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_v_query), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __pyx_v_alignment = __pyx_t_4;
+  __pyx_t_4 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":783
+ * 
+ *     # If there is no cigar, then it has failed a filter. Return None.
+ *     if not alignment.cigar:             # <<<<<<<<<<<<<<
+ *         return None
+ * 
+ */
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_cigar); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_t_2 = ((!__pyx_t_3) != 0);
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":784
+ *     # If there is no cigar, then it has failed a filter. Return None.
+ *     if not alignment.cigar:
+ *         return None             # <<<<<<<<<<<<<<
+ * 
+ *     start_end = None
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __Pyx_INCREF(Py_None);
+    __pyx_r = Py_None;
+    goto __pyx_L0;
+  }
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":786
+ *         return None
+ * 
+ *     start_end = None             # <<<<<<<<<<<<<<
+ *     if alignment.query_begin != -1:
+ *         start_end = [
+ */
+  __Pyx_INCREF(Py_None);
+  __pyx_v_start_end = Py_None;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":787
+ * 
+ *     start_end = None
+ *     if alignment.query_begin != -1:             # <<<<<<<<<<<<<<
+ *         start_end = [
+ *             (alignment.query_begin, alignment.query_end),
+ */
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __pyx_t_5 = PyObject_RichCompare(__pyx_t_4, __pyx_int_neg_1, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":789
+ *     if alignment.query_begin != -1:
+ *         start_end = [
+ *             (alignment.query_begin, alignment.query_end),             # <<<<<<<<<<<<<<
+ *             (alignment.target_begin, alignment.target_end_optimal)
+ *         ]
+ */
+    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_query_end); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_4);
+    __pyx_t_5 = 0;
+    __pyx_t_4 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":790
+ *         start_end = [
+ *             (alignment.query_begin, alignment.query_end),
+ *             (alignment.target_begin, alignment.target_end_optimal)             # <<<<<<<<<<<<<<
+ *         ]
+ *     if kwargs.get('protein', False):
+ */
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_target_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_4);
+    PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_5);
+    __pyx_t_4 = 0;
+    __pyx_t_5 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":788
+ *     start_end = None
+ *     if alignment.query_begin != -1:
+ *         start_end = [             # <<<<<<<<<<<<<<
+ *             (alignment.query_begin, alignment.query_end),
+ *             (alignment.target_begin, alignment.target_end_optimal)
+ */
+    __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+    __Pyx_GIVEREF(__pyx_t_1);
+    PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_6);
+    __pyx_t_1 = 0;
+    __pyx_t_6 = 0;
+    __Pyx_DECREF_SET(__pyx_v_start_end, __pyx_t_5);
+    __pyx_t_5 = 0;
+    goto __pyx_L5;
+  }
+  __pyx_L5:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":792
+ *             (alignment.target_begin, alignment.target_end_optimal)
+ *         ]
+ *     if kwargs.get('protein', False):             # <<<<<<<<<<<<<<
+ *         seqs = [
+ *             ProteinSequence(alignment.aligned_query_sequence, id='query'),
+ */
+  __pyx_t_5 = __Pyx_PyDict_GetItemDefault(__pyx_v_kwargs, __pyx_n_s_protein, Py_False); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  if (__pyx_t_2) {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":794
+ *     if kwargs.get('protein', False):
+ *         seqs = [
+ *             ProteinSequence(alignment.aligned_query_sequence, id='query'),             # <<<<<<<<<<<<<<
+ *             ProteinSequence(alignment.aligned_target_sequence, id='target')
+ *         ]
+ */
+    __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_6);
+    __pyx_t_6 = 0;
+    __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_id, __pyx_n_s_query) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":795
+ *         seqs = [
+ *             ProteinSequence(alignment.aligned_query_sequence, id='query'),
+ *             ProteinSequence(alignment.aligned_target_sequence, id='target')             # <<<<<<<<<<<<<<
+ *         ]
+ *     else:
+ */
+    __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+    __Pyx_GIVEREF(__pyx_t_1);
+    __pyx_t_1 = 0;
+    __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_id, __pyx_n_s_target) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_7);
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":793
+ *         ]
+ *     if kwargs.get('protein', False):
+ *         seqs = [             # <<<<<<<<<<<<<<
+ *             ProteinSequence(alignment.aligned_query_sequence, id='query'),
+ *             ProteinSequence(alignment.aligned_target_sequence, id='target')
+ */
+    __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 793; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    PyList_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_4);
+    PyList_SET_ITEM(__pyx_t_1, 1, __pyx_t_7);
+    __Pyx_GIVEREF(__pyx_t_7);
+    __pyx_t_4 = 0;
+    __pyx_t_7 = 0;
+    __pyx_v_seqs = ((PyObject*)__pyx_t_1);
+    __pyx_t_1 = 0;
+    goto __pyx_L6;
+  }
+  /*else*/ {
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":799
+ *     else:
+ *         seqs = [
+ *             NucleotideSequence(alignment.aligned_query_sequence, id='query'),             # <<<<<<<<<<<<<<
+ *             NucleotideSequence(alignment.aligned_target_sequence, id='target')
+ *         ]
+ */
+    __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_NucleotideSequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_7);
+    __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7);
+    __Pyx_GIVEREF(__pyx_t_7);
+    __pyx_t_7 = 0;
+    __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_7);
+    if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_id, __pyx_n_s_query) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_4, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":800
+ *         seqs = [
+ *             NucleotideSequence(alignment.aligned_query_sequence, id='query'),
+ *             NucleotideSequence(alignment.aligned_target_sequence, id='target')             # <<<<<<<<<<<<<<
+ *         ]
+ * 
+ */
+    __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_NucleotideSequence); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_7);
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_4);
+    __pyx_t_4 = 0;
+    __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_id, __pyx_n_s_target) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":798
+ *         ]
+ *     else:
+ *         seqs = [             # <<<<<<<<<<<<<<
+ *             NucleotideSequence(alignment.aligned_query_sequence, id='query'),
+ *             NucleotideSequence(alignment.aligned_target_sequence, id='target')
+ */
+    __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_5);
+    PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_6);
+    __pyx_t_5 = 0;
+    __pyx_t_6 = 0;
+    __pyx_v_seqs = ((PyObject*)__pyx_t_4);
+    __pyx_t_4 = 0;
+  }
+  __pyx_L6:;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":803
+ *         ]
+ * 
+ *     return Alignment(seqs, score=alignment.optimal_alignment_score,             # <<<<<<<<<<<<<<
+ *                      start_end_positions=start_end)
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_Alignment); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_6);
+  __Pyx_INCREF(__pyx_v_seqs);
+  PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_seqs);
+  __Pyx_GIVEREF(__pyx_v_seqs);
+  __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":804
+ * 
+ *     return Alignment(seqs, score=alignment.optimal_alignment_score,
+ *                      start_end_positions=start_end)             # <<<<<<<<<<<<<<
+ */
+  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_start_end_positions, __pyx_v_start_end) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":803
+ *         ]
+ * 
+ *     return Alignment(seqs, score=alignment.optimal_alignment_score,             # <<<<<<<<<<<<<<
+ *                      start_end_positions=start_end)
+ */
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":731
+ * 
+ * 
+ * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
+ *                              **kwargs):
+ *     """Align query and target sequences with Striped Smith-Waterman.
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.local_pairwise_align_ssw", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_query);
+  __Pyx_XDECREF(__pyx_v_alignment);
+  __Pyx_XDECREF(__pyx_v_start_end);
+  __Pyx_XDECREF(__pyx_v_seqs);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+ *         # experimental exception made for __getbuffer__ and __releasebuffer__
+ *         # -- the details of this may change.
+ *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
+ *             # This implementation of getbuffer is geared towards Cython
+ *             # requirements, and does not yet fullfill the PEP.
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
+  __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+  int __pyx_v_copy_shape;
+  int __pyx_v_i;
+  int __pyx_v_ndim;
+  int __pyx_v_endian_detector;
+  int __pyx_v_little_endian;
+  int __pyx_v_t;
+  char *__pyx_v_f;
+  PyArray_Descr *__pyx_v_descr = 0;
+  int __pyx_v_offset;
+  int __pyx_v_hasfields;
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  int __pyx_t_2;
+  int __pyx_t_3;
+  PyObject *__pyx_t_4 = NULL;
+  int __pyx_t_5;
+  int __pyx_t_6;
+  int __pyx_t_7;
+  PyObject *__pyx_t_8 = NULL;
+  char *__pyx_t_9;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__getbuffer__", 0);
+  if (__pyx_v_info != NULL) {
+    __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
+    __Pyx_GIVEREF(__pyx_v_info->obj);
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200
+ *             # of flags
+ * 
+ *             if info == NULL: return             # <<<<<<<<<<<<<<
+ * 
+ *             cdef int copy_shape, i, ndim
+ */
+  __pyx_t_1 = ((__pyx_v_info == NULL) != 0);
+  if (__pyx_t_1) {
+    __pyx_r = 0;
+    goto __pyx_L0;
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+ * 
+ *             cdef int copy_shape, i, ndim
+ *             cdef int endian_detector = 1             # <<<<<<<<<<<<<<
+ *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ * 
+ */
+  __pyx_v_endian_detector = 1;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204
+ *             cdef int copy_shape, i, ndim
+ *             cdef int endian_detector = 1
+ *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
+ * 
+ *             ndim = PyArray_NDIM(self)
+ */
+  __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+ *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ * 
+ *             ndim = PyArray_NDIM(self)             # <<<<<<<<<<<<<<
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+  __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208
+ *             ndim = PyArray_NDIM(self)
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 copy_shape = 1
+ *             else:
+ */
+  __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ *                 copy_shape = 1             # <<<<<<<<<<<<<<
+ *             else:
+ *                 copy_shape = 0
+ */
+    __pyx_v_copy_shape = 1;
+    goto __pyx_L4;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+ *                 copy_shape = 1
+ *             else:
+ *                 copy_shape = 0             # <<<<<<<<<<<<<<
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ */
+    __pyx_v_copy_shape = 0;
+  }
+  __pyx_L4:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
+  __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):             # <<<<<<<<<<<<<<
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ */
+    __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
+    __pyx_t_3 = __pyx_t_2;
+  } else {
+    __pyx_t_3 = __pyx_t_1;
+  }
+  if (__pyx_t_3) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ */
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
+  __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
+  if (__pyx_t_3) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):             # <<<<<<<<<<<<<<
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ * 
+ */
+    __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
+    __pyx_t_2 = __pyx_t_1;
+  } else {
+    __pyx_t_2 = __pyx_t_3;
+  }
+  if (__pyx_t_2) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             info.buf = PyArray_DATA(self)
+ */
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ * 
+ *             info.buf = PyArray_DATA(self)             # <<<<<<<<<<<<<<
+ *             info.ndim = ndim
+ *             if copy_shape:
+ */
+  __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+ * 
+ *             info.buf = PyArray_DATA(self)
+ *             info.ndim = ndim             # <<<<<<<<<<<<<<
+ *             if copy_shape:
+ *                 # Allocate new buffer for strides and shape info.
+ */
+  __pyx_v_info->ndim = __pyx_v_ndim;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223
+ *             info.buf = PyArray_DATA(self)
+ *             info.ndim = ndim
+ *             if copy_shape:             # <<<<<<<<<<<<<<
+ *                 # Allocate new buffer for strides and shape info.
+ *                 # This is allocated as one block, strides first.
+ */
+  __pyx_t_2 = (__pyx_v_copy_shape != 0);
+  if (__pyx_t_2) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+ *                 # Allocate new buffer for strides and shape info.
+ *                 # This is allocated as one block, strides first.
+ *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)             # <<<<<<<<<<<<<<
+ *                 info.shape = info.strides + ndim
+ *                 for i in range(ndim):
+ */
+    __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227
+ *                 # This is allocated as one block, strides first.
+ *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
+ *                 info.shape = info.strides + ndim             # <<<<<<<<<<<<<<
+ *                 for i in range(ndim):
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]
+ */
+    __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228
+ *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
+ *                 info.shape = info.strides + ndim
+ *                 for i in range(ndim):             # <<<<<<<<<<<<<<
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]
+ *                     info.shape[i] = PyArray_DIMS(self)[i]
+ */
+    __pyx_t_5 = __pyx_v_ndim;
+    for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
+      __pyx_v_i = __pyx_t_6;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+ *                 info.shape = info.strides + ndim
+ *                 for i in range(ndim):
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]             # <<<<<<<<<<<<<<
+ *                     info.shape[i] = PyArray_DIMS(self)[i]
+ *             else:
+ */
+      (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+ *                 for i in range(ndim):
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]
+ *                     info.shape[i] = PyArray_DIMS(self)[i]             # <<<<<<<<<<<<<<
+ *             else:
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
+ */
+      (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
+    }
+    goto __pyx_L7;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+ *                     info.shape[i] = PyArray_DIMS(self)[i]
+ *             else:
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)             # <<<<<<<<<<<<<<
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
+ *             info.suboffsets = NULL
+ */
+    __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+ *             else:
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)             # <<<<<<<<<<<<<<
+ *             info.suboffsets = NULL
+ *             info.itemsize = PyArray_ITEMSIZE(self)
+ */
+    __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
+  }
+  __pyx_L7:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
+ *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
+ *             info.itemsize = PyArray_ITEMSIZE(self)
+ *             info.readonly = not PyArray_ISWRITEABLE(self)
+ */
+  __pyx_v_info->suboffsets = NULL;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
+ *             info.suboffsets = NULL
+ *             info.itemsize = PyArray_ITEMSIZE(self)             # <<<<<<<<<<<<<<
+ *             info.readonly = not PyArray_ISWRITEABLE(self)
+ * 
+ */
+  __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+ *             info.suboffsets = NULL
+ *             info.itemsize = PyArray_ITEMSIZE(self)
+ *             info.readonly = not PyArray_ISWRITEABLE(self)             # <<<<<<<<<<<<<<
+ * 
+ *             cdef int t
+ */
+  __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+ * 
+ *             cdef int t
+ *             cdef char* f = NULL             # <<<<<<<<<<<<<<
+ *             cdef dtype descr = self.descr
+ *             cdef list stack
+ */
+  __pyx_v_f = NULL;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240
+ *             cdef int t
+ *             cdef char* f = NULL
+ *             cdef dtype descr = self.descr             # <<<<<<<<<<<<<<
+ *             cdef list stack
+ *             cdef int offset
+ */
+  __pyx_t_4 = ((PyObject *)__pyx_v_self->descr);
+  __Pyx_INCREF(__pyx_t_4);
+  __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4);
+  __pyx_t_4 = 0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244
+ *             cdef int offset
+ * 
+ *             cdef bint hasfields = PyDataType_HASFIELDS(descr)             # <<<<<<<<<<<<<<
+ * 
+ *             if not hasfields and not copy_shape:
+ */
+  __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246
+ *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
+ * 
+ *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
+ *                 # do not call releasebuffer
+ *                 info.obj = None
+ */
+  __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
+  if (__pyx_t_2) {
+    __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0);
+    __pyx_t_1 = __pyx_t_3;
+  } else {
+    __pyx_t_1 = __pyx_t_2;
+  }
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
+ *             if not hasfields and not copy_shape:
+ *                 # do not call releasebuffer
+ *                 info.obj = None             # <<<<<<<<<<<<<<
+ *             else:
+ *                 # need to call releasebuffer
+ */
+    __Pyx_INCREF(Py_None);
+    __Pyx_GIVEREF(Py_None);
+    __Pyx_GOTREF(__pyx_v_info->obj);
+    __Pyx_DECREF(__pyx_v_info->obj);
+    __pyx_v_info->obj = Py_None;
+    goto __pyx_L10;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+ *             else:
+ *                 # need to call releasebuffer
+ *                 info.obj = self             # <<<<<<<<<<<<<<
+ * 
+ *             if not hasfields:
+ */
+    __Pyx_INCREF(((PyObject *)__pyx_v_self));
+    __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
+    __Pyx_GOTREF(__pyx_v_info->obj);
+    __Pyx_DECREF(__pyx_v_info->obj);
+    __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
+  }
+  __pyx_L10:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253
+ *                 info.obj = self
+ * 
+ *             if not hasfields:             # <<<<<<<<<<<<<<
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ */
+  __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+ * 
+ *             if not hasfields:
+ *                 t = descr.type_num             # <<<<<<<<<<<<<<
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ */
+    __pyx_t_5 = __pyx_v_descr->type_num;
+    __pyx_v_t = __pyx_t_5;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
+ */
+    __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0);
+    if (__pyx_t_1) {
+      __pyx_t_2 = (__pyx_v_little_endian != 0);
+    } else {
+      __pyx_t_2 = __pyx_t_1;
+    }
+    if (!__pyx_t_2) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
+ *                     raise ValueError(u"Non-native byte order not supported")
+ *                 if   t == NPY_BYTE:        f = "b"
+ */
+      __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0);
+      if (__pyx_t_1) {
+        __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0);
+        __pyx_t_7 = __pyx_t_3;
+      } else {
+        __pyx_t_7 = __pyx_t_1;
+      }
+      __pyx_t_1 = __pyx_t_7;
+    } else {
+      __pyx_t_1 = __pyx_t_2;
+    }
+    if (__pyx_t_1) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"
+ */
+      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+    switch (__pyx_v_t) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
+ *                 if   t == NPY_BYTE:        f = "b"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_UBYTE:       f = "B"
+ *                 elif t == NPY_SHORT:       f = "h"
+ */
+      case NPY_BYTE:
+      __pyx_v_f = __pyx_k_b;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+ *                     raise ValueError(u"Non-native byte order not supported")
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_SHORT:       f = "h"
+ *                 elif t == NPY_USHORT:      f = "H"
+ */
+      case NPY_UBYTE:
+      __pyx_v_f = __pyx_k_B;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"
+ *                 elif t == NPY_SHORT:       f = "h"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_USHORT:      f = "H"
+ *                 elif t == NPY_INT:         f = "i"
+ */
+      case NPY_SHORT:
+      __pyx_v_f = __pyx_k_h;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+ *                 elif t == NPY_UBYTE:       f = "B"
+ *                 elif t == NPY_SHORT:       f = "h"
+ *                 elif t == NPY_USHORT:      f = "H"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_INT:         f = "i"
+ *                 elif t == NPY_UINT:        f = "I"
+ */
+      case NPY_USHORT:
+      __pyx_v_f = __pyx_k_H;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+ *                 elif t == NPY_SHORT:       f = "h"
+ *                 elif t == NPY_USHORT:      f = "H"
+ *                 elif t == NPY_INT:         f = "i"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_UINT:        f = "I"
+ *                 elif t == NPY_LONG:        f = "l"
+ */
+      case NPY_INT:
+      __pyx_v_f = __pyx_k_i;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+ *                 elif t == NPY_USHORT:      f = "H"
+ *                 elif t == NPY_INT:         f = "i"
+ *                 elif t == NPY_UINT:        f = "I"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_LONG:        f = "l"
+ *                 elif t == NPY_ULONG:       f = "L"
+ */
+      case NPY_UINT:
+      __pyx_v_f = __pyx_k_I;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+ *                 elif t == NPY_INT:         f = "i"
+ *                 elif t == NPY_UINT:        f = "I"
+ *                 elif t == NPY_LONG:        f = "l"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_ULONG:       f = "L"
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ */
+      case NPY_LONG:
+      __pyx_v_f = __pyx_k_l;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+ *                 elif t == NPY_UINT:        f = "I"
+ *                 elif t == NPY_LONG:        f = "l"
+ *                 elif t == NPY_ULONG:       f = "L"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ */
+      case NPY_ULONG:
+      __pyx_v_f = __pyx_k_L;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+ *                 elif t == NPY_LONG:        f = "l"
+ *                 elif t == NPY_ULONG:       f = "L"
+ *                 elif t == NPY_LONGLONG:    f = "q"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ *                 elif t == NPY_FLOAT:       f = "f"
+ */
+      case NPY_LONGLONG:
+      __pyx_v_f = __pyx_k_q;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+ *                 elif t == NPY_ULONG:       f = "L"
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ *                 elif t == NPY_ULONGLONG:   f = "Q"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_FLOAT:       f = "f"
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ */
+      case NPY_ULONGLONG:
+      __pyx_v_f = __pyx_k_Q;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ *                 elif t == NPY_FLOAT:       f = "f"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ */
+      case NPY_FLOAT:
+      __pyx_v_f = __pyx_k_f;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ *                 elif t == NPY_FLOAT:       f = "f"
+ *                 elif t == NPY_DOUBLE:      f = "d"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ */
+      case NPY_DOUBLE:
+      __pyx_v_f = __pyx_k_d;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+ *                 elif t == NPY_FLOAT:       f = "f"
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ */
+      case NPY_LONGDOUBLE:
+      __pyx_v_f = __pyx_k_g;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ *                 elif t == NPY_CFLOAT:      f = "Zf"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ */
+      case NPY_CFLOAT:
+      __pyx_v_f = __pyx_k_Zf;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ *                 elif t == NPY_OBJECT:      f = "O"
+ */
+      case NPY_CDOUBLE:
+      __pyx_v_f = __pyx_k_Zd;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_OBJECT:      f = "O"
+ *                 else:
+ */
+      case NPY_CLONGDOUBLE:
+      __pyx_v_f = __pyx_k_Zg;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+      case NPY_OBJECT:
+      __pyx_v_f = __pyx_k_O;
+      break;
+      default:
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+ *                 elif t == NPY_OBJECT:      f = "O"
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
+ *                 info.format = f
+ *                 return
+ */
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_8);
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8);
+      __Pyx_GIVEREF(__pyx_t_8);
+      __pyx_t_8 = 0;
+      __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_8);
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __Pyx_Raise(__pyx_t_8, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      break;
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ *                 info.format = f             # <<<<<<<<<<<<<<
+ *                 return
+ *             else:
+ */
+    __pyx_v_info->format = __pyx_v_f;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ *                 info.format = f
+ *                 return             # <<<<<<<<<<<<<<
+ *             else:
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
+ */
+    __pyx_r = 0;
+    goto __pyx_L0;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+ *                 return
+ *             else:
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)             # <<<<<<<<<<<<<<
+ *                 info.format[0] = c'^' # Native data types, manual alignment
+ *                 offset = 0
+ */
+    __pyx_v_info->format = ((char *)malloc(255));
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+ *             else:
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
+ *                 info.format[0] = c'^' # Native data types, manual alignment             # <<<<<<<<<<<<<<
+ *                 offset = 0
+ *                 f = _util_dtypestring(descr, info.format + 1,
+ */
+    (__pyx_v_info->format[0]) = '^';
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
+ *                 info.format[0] = c'^' # Native data types, manual alignment
+ *                 offset = 0             # <<<<<<<<<<<<<<
+ *                 f = _util_dtypestring(descr, info.format + 1,
+ *                                       info.format + _buffer_format_string_len,
+ */
+    __pyx_v_offset = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+ *                 info.format[0] = c'^' # Native data types, manual alignment
+ *                 offset = 0
+ *                 f = _util_dtypestring(descr, info.format + 1,             # <<<<<<<<<<<<<<
+ *                                       info.format + _buffer_format_string_len,
+ *                                       &offset)
+ */
+    __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_v_f = __pyx_t_9;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+ *                                       info.format + _buffer_format_string_len,
+ *                                       &offset)
+ *                 f[0] = c'\0' # Terminate format string             # <<<<<<<<<<<<<<
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ */
+    (__pyx_v_f[0]) = '\x00';
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+ *         # experimental exception made for __getbuffer__ and __releasebuffer__
+ *         # -- the details of this may change.
+ *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
+ *             # This implementation of getbuffer is geared towards Cython
+ *             # requirements, and does not yet fullfill the PEP.
+ */
+
+  /* function exit code */
+  __pyx_r = 0;
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_8);
+  __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = -1;
+  if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
+    __Pyx_GOTREF(__pyx_v_info->obj);
+    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
+  }
+  goto __pyx_L2;
+  __pyx_L0:;
+  if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
+    __Pyx_GOTREF(Py_None);
+    __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
+  }
+  __pyx_L2:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_descr);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+ *                 f[0] = c'\0' # Terminate format string
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
+  __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  __Pyx_RefNannySetupContext("__releasebuffer__", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+  __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)             # <<<<<<<<<<<<<<
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ *                 stdlib.free(info.strides)
+ */
+    free(__pyx_v_info->format);
+    goto __pyx_L3;
+  }
+  __pyx_L3:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.strides)
+ *                 # info.shape was stored after info.strides in the same block
+ */
+  __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ *                 stdlib.free(info.strides)             # <<<<<<<<<<<<<<
+ *                 # info.shape was stored after info.strides in the same block
+ * 
+ */
+    free(__pyx_v_info->strides);
+    goto __pyx_L4;
+  }
+  __pyx_L4:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+ *                 f[0] = c'\0' # Terminate format string
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ */
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+ * ctypedef npy_cdouble     complex_t
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):
+ *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+ * ctypedef npy_cdouble     complex_t
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
+ *     # Recursive utility function used in __getbuffer__ to get format
+ *     # string. The new location in the format string is returned.
+ */
+
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
+  PyArray_Descr *__pyx_v_child = 0;
+  int __pyx_v_endian_detector;
+  int __pyx_v_little_endian;
+  PyObject *__pyx_v_fields = 0;
+  PyObject *__pyx_v_childname = NULL;
+  PyObject *__pyx_v_new_offset = NULL;
+  PyObject *__pyx_v_t = NULL;
+  char *__pyx_r;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  Py_ssize_t __pyx_t_2;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  int __pyx_t_5;
+  int __pyx_t_6;
+  int __pyx_t_7;
+  int __pyx_t_8;
+  int __pyx_t_9;
+  long __pyx_t_10;
+  char *__pyx_t_11;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_util_dtypestring", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
+ *     cdef int delta_offset
+ *     cdef tuple i
+ *     cdef int endian_detector = 1             # <<<<<<<<<<<<<<
+ *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ *     cdef tuple fields
+ */
+  __pyx_v_endian_detector = 1;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
+ *     cdef tuple i
+ *     cdef int endian_detector = 1
+ *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
+ *     cdef tuple fields
+ * 
+ */
+  __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
+ *     cdef tuple fields
+ * 
+ *     for childname in descr.names:             # <<<<<<<<<<<<<<
+ *         fields = descr.fields[childname]
+ *         child, new_offset = fields
+ */
+  if (unlikely(__pyx_v_descr->names == Py_None)) {
+    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
+  for (;;) {
+    if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+    #if CYTHON_COMPILING_IN_CPYTHON
+    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    #else
+    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    #endif
+    __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
+    __pyx_t_3 = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795
+ * 
+ *     for childname in descr.names:
+ *         fields = descr.fields[childname]             # <<<<<<<<<<<<<<
+ *         child, new_offset = fields
+ * 
+ */
+    __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_3);
+    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
+    __pyx_t_3 = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796
+ *     for childname in descr.names:
+ *         fields = descr.fields[childname]
+ *         child, new_offset = fields             # <<<<<<<<<<<<<<
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:
+ */
+    if (likely(__pyx_v_fields != Py_None)) {
+      PyObject* sequence = __pyx_v_fields;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      Py_ssize_t size = Py_SIZE(sequence);
+      #else
+      Py_ssize_t size = PySequence_Size(sequence);
+      #endif
+      if (unlikely(size != 2)) {
+        if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+        else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
+      __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); 
+      __Pyx_INCREF(__pyx_t_3);
+      __Pyx_INCREF(__pyx_t_4);
+      #else
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      #endif
+    } else {
+      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
+    __pyx_t_3 = 0;
+    __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
+    __pyx_t_4 = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+ *         child, new_offset = fields
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ */
+    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
+    if (__pyx_t_6) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or
+ */
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
+    __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0);
+    if (__pyx_t_6) {
+      __pyx_t_7 = (__pyx_v_little_endian != 0);
+    } else {
+      __pyx_t_7 = __pyx_t_6;
+    }
+    if (!__pyx_t_7) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or
+ *             (child.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
+ *             raise ValueError(u"Non-native byte order not supported")
+ *             # One could encode it in the format string and have Cython
+ */
+      __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0);
+      if (__pyx_t_6) {
+        __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0);
+        __pyx_t_9 = __pyx_t_8;
+      } else {
+        __pyx_t_9 = __pyx_t_6;
+      }
+      __pyx_t_6 = __pyx_t_9;
+    } else {
+      __pyx_t_6 = __pyx_t_7;
+    }
+    if (__pyx_t_6) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+ *         if ((child.byteorder == c'>' and little_endian) or
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *             # One could encode it in the format string and have Cython
+ *             # complain instead, BUT: < and > in format strings also imply
+ */
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
+ * 
+ *         # Output padding bytes
+ *         while offset[0] < new_offset:             # <<<<<<<<<<<<<<
+ *             f[0] = 120 # "x"; pad byte
+ *             f += 1
+ */
+    while (1) {
+      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (!__pyx_t_6) break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814
+ *         # Output padding bytes
+ *         while offset[0] < new_offset:
+ *             f[0] = 120 # "x"; pad byte             # <<<<<<<<<<<<<<
+ *             f += 1
+ *             offset[0] += 1
+ */
+      (__pyx_v_f[0]) = 120;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
+ *         while offset[0] < new_offset:
+ *             f[0] = 120 # "x"; pad byte
+ *             f += 1             # <<<<<<<<<<<<<<
+ *             offset[0] += 1
+ * 
+ */
+      __pyx_v_f = (__pyx_v_f + 1);
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+ *             f[0] = 120 # "x"; pad byte
+ *             f += 1
+ *             offset[0] += 1             # <<<<<<<<<<<<<<
+ * 
+ *         offset[0] += child.itemsize
+ */
+      __pyx_t_10 = 0;
+      (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1);
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+ *             offset[0] += 1
+ * 
+ *         offset[0] += child.itemsize             # <<<<<<<<<<<<<<
+ * 
+ *         if not PyDataType_HASFIELDS(child):
+ */
+    __pyx_t_10 = 0;
+    (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize);
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
+ *         offset[0] += child.itemsize
+ * 
+ *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
+ *             t = child.type_num
+ *             if end - f < 5:
+ */
+    __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
+    if (__pyx_t_6) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+ * 
+ *         if not PyDataType_HASFIELDS(child):
+ *             t = child.type_num             # <<<<<<<<<<<<<<
+ *             if end - f < 5:
+ *                 raise RuntimeError(u"Format string allocated too short.")
+ */
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
+      __pyx_t_4 = 0;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
+ *         if not PyDataType_HASFIELDS(child):
+ *             t = child.type_num
+ *             if end - f < 5:             # <<<<<<<<<<<<<<
+ *                 raise RuntimeError(u"Format string allocated too short.")
+ * 
+ */
+      __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
+      if (__pyx_t_6) {
+
+        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+ *             t = child.type_num
+ *             if end - f < 5:
+ *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
+ * 
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ */
+        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_4);
+        __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+ * 
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ *             if   t == NPY_BYTE:        f[0] =  98 #"b"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 98;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ *             if   t == NPY_BYTE:        f[0] =  98 #"b"
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 66;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
+ *             if   t == NPY_BYTE:        f[0] =  98 #"b"
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 104;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 72;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ *             elif t == NPY_INT:         f[0] = 105 #"i"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 105;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 73;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 108;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 76;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 113;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 81;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 102;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 100;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 103;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf             # <<<<<<<<<<<<<<
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 90;
+        (__pyx_v_f[1]) = 102;
+        __pyx_v_f = (__pyx_v_f + 1);
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd             # <<<<<<<<<<<<<<
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 90;
+        (__pyx_v_f[1]) = 100;
+        __pyx_v_f = (__pyx_v_f + 1);
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg             # <<<<<<<<<<<<<<
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
+ *             else:
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 90;
+        (__pyx_v_f[1]) = 103;
+        __pyx_v_f = (__pyx_v_f + 1);
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"             # <<<<<<<<<<<<<<
+ *             else:
+ *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 79;
+        goto __pyx_L11;
+      }
+      /*else*/ {
+
+        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
+ *             else:
+ *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
+ *             f += 1
+ *         else:
+ */
+        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_4);
+        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
+        __Pyx_GIVEREF(__pyx_t_3);
+        __pyx_t_3 = 0;
+        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+        __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+        __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+      __pyx_L11:;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+ *             else:
+ *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ *             f += 1             # <<<<<<<<<<<<<<
+ *         else:
+ *             # Cython ignores struct boundary information ("T{...}"),
+ */
+      __pyx_v_f = (__pyx_v_f + 1);
+      goto __pyx_L9;
+    }
+    /*else*/ {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849
+ *             # Cython ignores struct boundary information ("T{...}"),
+ *             # so don't output it
+ *             f = _util_dtypestring(child, f, end, offset)             # <<<<<<<<<<<<<<
+ *     return f
+ * 
+ */
+      __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_v_f = __pyx_t_11;
+    }
+    __pyx_L9:;
+  }
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850
+ *             # so don't output it
+ *             f = _util_dtypestring(child, f, end, offset)
+ *     return f             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+  __pyx_r = __pyx_v_f;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
+ *     # Recursive utility function used in __getbuffer__ to get format
+ *     # string. The new location in the format string is returned.
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_child);
+  __Pyx_XDECREF(__pyx_v_fields);
+  __Pyx_XDECREF(__pyx_v_childname);
+  __Pyx_XDECREF(__pyx_v_new_offset);
+  __Pyx_XDECREF(__pyx_v_t);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+ * 
+ * 
+ * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
+ *      cdef PyObject* baseptr
+ *      if base is None:
+ */
+
+static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
+  PyObject *__pyx_v_baseptr;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  int __pyx_t_2;
+  __Pyx_RefNannySetupContext("set_array_base", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
+ * cdef inline void set_array_base(ndarray arr, object base):
+ *      cdef PyObject* baseptr
+ *      if base is None:             # <<<<<<<<<<<<<<
+ *          baseptr = NULL
+ *      else:
+ */
+  __pyx_t_1 = (__pyx_v_base == Py_None);
+  __pyx_t_2 = (__pyx_t_1 != 0);
+  if (__pyx_t_2) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+ *      cdef PyObject* baseptr
+ *      if base is None:
+ *          baseptr = NULL             # <<<<<<<<<<<<<<
+ *      else:
+ *          Py_INCREF(base) # important to do this before decref below!
+ */
+    __pyx_v_baseptr = NULL;
+    goto __pyx_L3;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+ *          baseptr = NULL
+ *      else:
+ *          Py_INCREF(base) # important to do this before decref below!             # <<<<<<<<<<<<<<
+ *          baseptr = <PyObject*>base
+ *      Py_XDECREF(arr.base)
+ */
+    Py_INCREF(__pyx_v_base);
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+ *      else:
+ *          Py_INCREF(base) # important to do this before decref below!
+ *          baseptr = <PyObject*>base             # <<<<<<<<<<<<<<
+ *      Py_XDECREF(arr.base)
+ *      arr.base = baseptr
+ */
+    __pyx_v_baseptr = ((PyObject *)__pyx_v_base);
+  }
+  __pyx_L3:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973
+ *          Py_INCREF(base) # important to do this before decref below!
+ *          baseptr = <PyObject*>base
+ *      Py_XDECREF(arr.base)             # <<<<<<<<<<<<<<
+ *      arr.base = baseptr
+ * 
+ */
+  Py_XDECREF(__pyx_v_arr->base);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+ *          baseptr = <PyObject*>base
+ *      Py_XDECREF(arr.base)
+ *      arr.base = baseptr             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object get_array_base(ndarray arr):
+ */
+  __pyx_v_arr->base = __pyx_v_baseptr;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+ * 
+ * 
+ * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
+ *      cdef PyObject* baseptr
+ *      if base is None:
+ */
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+ *      arr.base = baseptr
+ * 
+ * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
+ *     if arr.base is NULL:
+ *         return None
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  __Pyx_RefNannySetupContext("get_array_base", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+ * 
+ * cdef inline object get_array_base(ndarray arr):
+ *     if arr.base is NULL:             # <<<<<<<<<<<<<<
+ *         return None
+ *     else:
+ */
+  __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978
+ * cdef inline object get_array_base(ndarray arr):
+ *     if arr.base is NULL:
+ *         return None             # <<<<<<<<<<<<<<
+ *     else:
+ *         return <object>arr.base
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __Pyx_INCREF(Py_None);
+    __pyx_r = Py_None;
+    goto __pyx_L0;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
+ *         return None
+ *     else:
+ *         return <object>arr.base             # <<<<<<<<<<<<<<
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
+    __pyx_r = ((PyObject *)__pyx_v_arr->base);
+    goto __pyx_L0;
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+ *      arr.base = baseptr
+ * 
+ * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
+ *     if arr.base is NULL:
+ *         return None
+ */
+
+  /* function exit code */
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
+
+static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure(PyTypeObject *t, PyObject *a, PyObject *k) {
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *p;
+  PyObject *o;
+  if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+    o = (*t->tp_alloc)(t, 0);
+  } else {
+    o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+  }
+  if (unlikely(!o)) return 0;
+  p = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)o);
+  p->__pyx_vtab = __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
+  p->read_sequence = ((PyObject*)Py_None); Py_INCREF(Py_None);
+  p->reference_sequence = ((PyObject*)Py_None); Py_INCREF(Py_None);
+  p->_cigar_string = ((PyObject*)Py_None); Py_INCREF(Py_None);
+  if (unlikely(__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_1__cinit__(o, a, k) < 0)) {
+    Py_DECREF(o); o = 0;
+  }
+  return o;
+}
+
+static void __pyx_tp_dealloc_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure(PyObject *o) {
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *p = (struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)o;
+  #if PY_VERSION_HEX >= 0x030400a1
+  if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
+    if (PyObject_CallFinalizerFromDealloc(o)) return;
+  }
+  #endif
+  {
+    PyObject *etype, *eval, *etb;
+    PyErr_Fetch(&etype, &eval, &etb);
+    ++Py_REFCNT(o);
+    __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_3__dealloc__(o);
+    --Py_REFCNT(o);
+    PyErr_Restore(etype, eval, etb);
+  }
+  Py_CLEAR(p->read_sequence);
+  Py_CLEAR(p->reference_sequence);
+  Py_CLEAR(p->_cigar_string);
+  (*Py_TYPE(o)->tp_free)(o);
+}
+static PyObject *__pyx_sq_item_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure(PyObject *o, Py_ssize_t i) {
+  PyObject *r;
+  PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
+  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
+  Py_DECREF(x);
+  return r;
+}
+
+static PyMethodDef __pyx_methods_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure[] = {
+  {__Pyx_NAMESTR("optimal_alignment_score"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_11optimal_alignment_score, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score)},
+  {__Pyx_NAMESTR("suboptimal_alignment_score"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_13suboptimal_alignment_score, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score)},
+  {__Pyx_NAMESTR("target_begin"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_15target_begin, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin)},
+  {__Pyx_NAMESTR("target_end_optimal"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_17target_end_optimal, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal)},
+  {__Pyx_NAMESTR("target_end_suboptimal"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_19target_end_suboptimal, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal)},
+  {__Pyx_NAMESTR("query_begin"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_21query_begin, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin)},
+  {__Pyx_NAMESTR("query_end"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_23query_end, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end)},
+  {__Pyx_NAMESTR("cigar"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_25cigar, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar)},
+  {__Pyx_NAMESTR("query_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_27query_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence)},
+  {__Pyx_NAMESTR("target_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_29target_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence)},
+  {__Pyx_NAMESTR("aligned_query_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_31aligned_query_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence)},
+  {__Pyx_NAMESTR("aligned_target_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_33aligned_target_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence)},
+  {__Pyx_NAMESTR("set_zero_based"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_35set_zero_based, METH_O, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based)},
+  {__Pyx_NAMESTR("is_zero_based"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_37is_zero_based, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based)},
+  {__Pyx_NAMESTR("_get_aligned_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_39_get_aligned_sequence, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
+  {__Pyx_NAMESTR("_tuples_from_cigar"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_41_tuples_from_cigar, METH_NOARGS, __Pyx_DOCSTR(0)},
+  {0, 0, 0, 0}
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_AlignmentStructure = {
+  0, /*sq_length*/
+  0, /*sq_concat*/
+  0, /*sq_repeat*/
+  __pyx_sq_item_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, /*sq_item*/
+  0, /*sq_slice*/
+  0, /*sq_ass_item*/
+  0, /*sq_ass_slice*/
+  0, /*sq_contains*/
+  0, /*sq_inplace_concat*/
+  0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_AlignmentStructure = {
+  0, /*mp_length*/
+  __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_5__getitem__, /*mp_subscript*/
+  0, /*mp_ass_subscript*/
+};
+
+static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = {
+  PyVarObject_HEAD_INIT(0, 0)
+  __Pyx_NAMESTR("skbio.alignment._ssw_wrapper.AlignmentStructure"), /*tp_name*/
+  sizeof(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure), /*tp_basicsize*/
+  0, /*tp_itemsize*/
+  __pyx_tp_dealloc_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, /*tp_dealloc*/
+  0, /*tp_print*/
+  0, /*tp_getattr*/
+  0, /*tp_setattr*/
+  #if PY_MAJOR_VERSION < 3
+  0, /*tp_compare*/
+  #else
+  0, /*reserved*/
+  #endif
+  __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_7__repr__, /*tp_repr*/
+  0, /*tp_as_number*/
+  &__pyx_tp_as_sequence_AlignmentStructure, /*tp_as_sequence*/
+  &__pyx_tp_as_mapping_AlignmentStructure, /*tp_as_mapping*/
+  0, /*tp_hash*/
+  0, /*tp_call*/
+  __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_9__str__, /*tp_str*/
+  0, /*tp_getattro*/
+  0, /*tp_setattro*/
+  0, /*tp_as_buffer*/
+  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+  __Pyx_DOCSTR("Wraps the result of an alignment c struct so it is accessible to Python\n\n    Attributes\n    ----------\n    optimal_alignment_score\n    suboptimal_alignment_score\n    target_begin\n    target_end_optimal\n    target_end_suboptimal\n    query_begin\n    query_end\n    cigar\n    query_sequence\n    target_sequence\n    aligned_query_sequence\n    aligned_target_sequence\n\n    Notes\n    -----\n    `cigar` may be empty depending on parameters used.\n\n    `target_begi [...]
+  0, /*tp_traverse*/
+  0, /*tp_clear*/
+  0, /*tp_richcompare*/
+  0, /*tp_weaklistoffset*/
+  0, /*tp_iter*/
+  0, /*tp_iternext*/
+  __pyx_methods_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, /*tp_methods*/
+  0, /*tp_members*/
+  0, /*tp_getset*/
+  0, /*tp_base*/
+  0, /*tp_dict*/
+  0, /*tp_descr_get*/
+  0, /*tp_descr_set*/
+  0, /*tp_dictoffset*/
+  0, /*tp_init*/
+  0, /*tp_alloc*/
+  __pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, /*tp_new*/
+  0, /*tp_free*/
+  0, /*tp_is_gc*/
+  0, /*tp_bases*/
+  0, /*tp_mro*/
+  0, /*tp_cache*/
+  0, /*tp_subclasses*/
+  0, /*tp_weaklist*/
+  0, /*tp_del*/
+  #if PY_VERSION_HEX >= 0x02060000
+  0, /*tp_version_tag*/
+  #endif
+  #if PY_VERSION_HEX >= 0x030400a1
+  0, /*tp_finalize*/
+  #endif
+};
+static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
+
+static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(PyTypeObject *t, PyObject *a, PyObject *k) {
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *p;
+  PyObject *o;
+  if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+    o = (*t->tp_alloc)(t, 0);
+  } else {
+    o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+  }
+  if (unlikely(!o)) return 0;
+  p = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)o);
+  p->__pyx_vtab = __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
+  p->read_sequence = ((PyObject*)Py_None); Py_INCREF(Py_None);
+  p->is_protein = ((PyBoolObject *)Py_None); Py_INCREF(Py_None);
+  p->suppress_sequences = ((PyBoolObject *)Py_None); Py_INCREF(Py_None);
+  p->__pyx___KEEP_IT_IN_SCOPE_read = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
+  p->__pyx___KEEP_IT_IN_SCOPE_matrix = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
+  if (unlikely(__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__cinit__(o, a, k) < 0)) {
+    Py_DECREF(o); o = 0;
+  }
+  return o;
+}
+
+static void __pyx_tp_dealloc_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(PyObject *o) {
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *p = (struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)o;
+  #if PY_VERSION_HEX >= 0x030400a1
+  if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
+    if (PyObject_CallFinalizerFromDealloc(o)) return;
+  }
+  #endif
+  PyObject_GC_UnTrack(o);
+  {
+    PyObject *etype, *eval, *etb;
+    PyErr_Fetch(&etype, &eval, &etb);
+    ++Py_REFCNT(o);
+    __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_5__dealloc__(o);
+    --Py_REFCNT(o);
+    PyErr_Restore(etype, eval, etb);
+  }
+  Py_CLEAR(p->read_sequence);
+  Py_CLEAR(p->is_protein);
+  Py_CLEAR(p->suppress_sequences);
+  Py_CLEAR(p->__pyx___KEEP_IT_IN_SCOPE_read);
+  Py_CLEAR(p->__pyx___KEEP_IT_IN_SCOPE_matrix);
+  (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(PyObject *o, visitproc v, void *a) {
+  int e;
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *p = (struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)o;
+  if (p->is_protein) {
+    e = (*v)(((PyObject*)p->is_protein), a); if (e) return e;
+  }
+  if (p->suppress_sequences) {
+    e = (*v)(((PyObject*)p->suppress_sequences), a); if (e) return e;
+  }
+  if (p->__pyx___KEEP_IT_IN_SCOPE_read) {
+    e = (*v)(((PyObject*)p->__pyx___KEEP_IT_IN_SCOPE_read), a); if (e) return e;
+  }
+  if (p->__pyx___KEEP_IT_IN_SCOPE_matrix) {
+    e = (*v)(((PyObject*)p->__pyx___KEEP_IT_IN_SCOPE_matrix), a); if (e) return e;
+  }
+  return 0;
+}
+
+static int __pyx_tp_clear_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(PyObject *o) {
+  PyObject* tmp;
+  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *p = (struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)o;
+  tmp = ((PyObject*)p->is_protein);
+  p->is_protein = ((PyBoolObject *)Py_None); Py_INCREF(Py_None);
+  Py_XDECREF(tmp);
+  tmp = ((PyObject*)p->suppress_sequences);
+  p->suppress_sequences = ((PyBoolObject *)Py_None); Py_INCREF(Py_None);
+  Py_XDECREF(tmp);
+  tmp = ((PyObject*)p->__pyx___KEEP_IT_IN_SCOPE_read);
+  p->__pyx___KEEP_IT_IN_SCOPE_read = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
+  Py_XDECREF(tmp);
+  tmp = ((PyObject*)p->__pyx___KEEP_IT_IN_SCOPE_matrix);
+  p->__pyx___KEEP_IT_IN_SCOPE_matrix = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
+  Py_XDECREF(tmp);
+  return 0;
+}
+
+static PyMethodDef __pyx_methods_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman[] = {
+  {__Pyx_NAMESTR("_get_bit_flag"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_7_get_bit_flag, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
+  {0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = {
+  PyVarObject_HEAD_INIT(0, 0)
+  __Pyx_NAMESTR("skbio.alignment._ssw_wrapper.StripedSmithWaterman"), /*tp_name*/
+  sizeof(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman), /*tp_basicsize*/
+  0, /*tp_itemsize*/
+  __pyx_tp_dealloc_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_dealloc*/
+  0, /*tp_print*/
+  0, /*tp_getattr*/
+  0, /*tp_setattr*/
+  #if PY_MAJOR_VERSION < 3
+  0, /*tp_compare*/
+  #else
+  0, /*reserved*/
+  #endif
+  0, /*tp_repr*/
+  0, /*tp_as_number*/
+  0, /*tp_as_sequence*/
+  0, /*tp_as_mapping*/
+  0, /*tp_hash*/
+  __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_3__call__, /*tp_call*/
+  0, /*tp_str*/
+  0, /*tp_getattro*/
+  0, /*tp_setattro*/
+  0, /*tp_as_buffer*/
+  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+  __Pyx_DOCSTR("Performs a striped (banded) Smith Waterman Alignment.\n\n    First a StripedSmithWaterman object must be instantiated with a query\n    sequence. The resulting object is then callable with a target sequence and\n    may be reused on a large collection of target sequences.\n\n    Parameters\n    ----------\n    query_sequence : string\n        The query sequence, this may be upper or lowercase from the set of\n        {A, C, G, T, N} (nucleotide) or from the set of\n       [...]
+  __pyx_tp_traverse_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_traverse*/
+  __pyx_tp_clear_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_clear*/
+  0, /*tp_richcompare*/
+  0, /*tp_weaklistoffset*/
+  0, /*tp_iter*/
+  0, /*tp_iternext*/
+  __pyx_methods_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_methods*/
+  0, /*tp_members*/
+  0, /*tp_getset*/
+  0, /*tp_base*/
+  0, /*tp_dict*/
+  0, /*tp_descr_get*/
+  0, /*tp_descr_set*/
+  0, /*tp_dictoffset*/
+  0, /*tp_init*/
+  0, /*tp_alloc*/
+  __pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_new*/
+  0, /*tp_free*/
+  0, /*tp_is_gc*/
+  0, /*tp_bases*/
+  0, /*tp_mro*/
+  0, /*tp_cache*/
+  0, /*tp_subclasses*/
+  0, /*tp_weaklist*/
+  0, /*tp_del*/
+  #if PY_VERSION_HEX >= 0x02060000
+  0, /*tp_version_tag*/
+  #endif
+  #if PY_VERSION_HEX >= 0x030400a1
+  0, /*tp_finalize*/
+  #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+  {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef __pyx_moduledef = {
+  #if PY_VERSION_HEX < 0x03020000
+    { PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
+  #else
+    PyModuleDef_HEAD_INIT,
+  #endif
+    __Pyx_NAMESTR("_ssw_wrapper"),
+    0, /* m_doc */
+    -1, /* m_size */
+    __pyx_methods /* m_methods */,
+    NULL, /* m_reload */
+    NULL, /* m_traverse */
+    NULL, /* m_clear */
+    NULL /* m_free */
+};
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+  {&__pyx_kp_s_, __pyx_k_, sizeof(__pyx_k_), 0, 0, 1, 0},
+  {&__pyx_n_s_ACGTN, __pyx_k_ACGTN, sizeof(__pyx_k_ACGTN), 0, 0, 1, 1},
+  {&__pyx_kp_s_ARNDCQEGHILKMFPSTWYVBZX, __pyx_k_ARNDCQEGHILKMFPSTWYVBZX, sizeof(__pyx_k_ARNDCQEGHILKMFPSTWYVBZX), 0, 0, 1, 0},
+  {&__pyx_n_s_Alignment, __pyx_k_Alignment, sizeof(__pyx_k_Alignment), 0, 0, 1, 1},
+  {&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1},
+  {&__pyx_n_s_Exception, __pyx_k_Exception, sizeof(__pyx_k_Exception), 0, 0, 1, 1},
+  {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
+  {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
+  {&__pyx_n_s_I, __pyx_k_I, sizeof(__pyx_k_I), 0, 0, 1, 1},
+  {&__pyx_kp_s_Length_d, __pyx_k_Length_d, sizeof(__pyx_k_Length_d), 0, 0, 1, 0},
+  {&__pyx_n_s_M, __pyx_k_M, sizeof(__pyx_k_M), 0, 0, 1, 1},
+  {&__pyx_kp_s_Must_provide_a_substitution_matr, __pyx_k_Must_provide_a_substitution_matr, sizeof(__pyx_k_Must_provide_a_substitution_matr), 0, 0, 1, 0},
+  {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1},
+  {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
+  {&__pyx_n_s_NucleotideSequence, __pyx_k_NucleotideSequence, sizeof(__pyx_k_NucleotideSequence), 0, 0, 1, 1},
+  {&__pyx_n_s_ProteinSequence, __pyx_k_ProteinSequence, sizeof(__pyx_k_ProteinSequence), 0, 0, 1, 1},
+  {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
+  {&__pyx_kp_s_Score_d, __pyx_k_Score_d, sizeof(__pyx_k_Score_d), 0, 0, 1, 0},
+  {&__pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_k_Users_jairideout_dev_scikit_bio, sizeof(__pyx_k_Users_jairideout_dev_scikit_bio), 0, 0, 1, 0},
+  {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
+  {&__pyx_kp_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 0},
+  {&__pyx_kp_s__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 0, 1, 0},
+  {&__pyx_kp_s__6, __pyx_k__6, sizeof(__pyx_k__6), 0, 0, 1, 0},
+  {&__pyx_kp_s__8, __pyx_k__8, sizeof(__pyx_k__8), 0, 0, 1, 0},
+  {&__pyx_n_s_aligned_query_sequence, __pyx_k_aligned_query_sequence, sizeof(__pyx_k_aligned_query_sequence), 0, 0, 1, 1},
+  {&__pyx_n_s_aligned_target_sequence, __pyx_k_aligned_target_sequence, sizeof(__pyx_k_aligned_target_sequence), 0, 0, 1, 1},
+  {&__pyx_n_s_alignment, __pyx_k_alignment, sizeof(__pyx_k_alignment), 0, 0, 1, 1},
+  {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1},
+  {&__pyx_n_s_begin, __pyx_k_begin, sizeof(__pyx_k_begin), 0, 0, 1, 1},
+  {&__pyx_n_s_cigar, __pyx_k_cigar, sizeof(__pyx_k_cigar), 0, 0, 1, 1},
+  {&__pyx_n_s_distance_filter, __pyx_k_distance_filter, sizeof(__pyx_k_distance_filter), 0, 0, 1, 1},
+  {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
+  {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
+  {&__pyx_n_s_end, __pyx_k_end, sizeof(__pyx_k_end), 0, 0, 1, 1},
+  {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
+  {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
+  {&__pyx_n_s_gap_extend_penalty, __pyx_k_gap_extend_penalty, sizeof(__pyx_k_gap_extend_penalty), 0, 0, 1, 1},
+  {&__pyx_kp_s_gap_extend_penalty_must_be_0, __pyx_k_gap_extend_penalty_must_be_0, sizeof(__pyx_k_gap_extend_penalty_must_be_0), 0, 0, 1, 0},
+  {&__pyx_n_s_gap_open_penalty, __pyx_k_gap_open_penalty, sizeof(__pyx_k_gap_open_penalty), 0, 0, 1, 1},
+  {&__pyx_kp_s_gap_open_penalty_must_be_0, __pyx_k_gap_open_penalty_must_be_0, sizeof(__pyx_k_gap_open_penalty_must_be_0), 0, 0, 1, 0},
+  {&__pyx_n_s_gap_type, __pyx_k_gap_type, sizeof(__pyx_k_gap_type), 0, 0, 1, 1},
+  {&__pyx_n_s_get, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1},
+  {&__pyx_n_s_get_aligned_sequence, __pyx_k_get_aligned_sequence, sizeof(__pyx_k_get_aligned_sequence), 0, 0, 1, 1},
+  {&__pyx_n_s_get_bit_flag, __pyx_k_get_bit_flag, sizeof(__pyx_k_get_bit_flag), 0, 0, 1, 1},
+  {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
+  {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+  {&__pyx_n_s_index_starts_at, __pyx_k_index_starts_at, sizeof(__pyx_k_index_starts_at), 0, 0, 1, 1},
+  {&__pyx_n_s_int8, __pyx_k_int8, sizeof(__pyx_k_int8), 0, 0, 1, 1},
+  {&__pyx_n_s_is_zero_based, __pyx_k_is_zero_based, sizeof(__pyx_k_is_zero_based), 0, 0, 1, 1},
+  {&__pyx_n_s_isdigit, __pyx_k_isdigit, sizeof(__pyx_k_isdigit), 0, 0, 1, 1},
+  {&__pyx_n_s_join, __pyx_k_join, sizeof(__pyx_k_join), 0, 0, 1, 1},
+  {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1},
+  {&__pyx_n_s_local_pairwise_align_ssw, __pyx_k_local_pairwise_align_ssw, sizeof(__pyx_k_local_pairwise_align_ssw), 0, 0, 1, 1},
+  {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+  {&__pyx_n_s_mask_auto, __pyx_k_mask_auto, sizeof(__pyx_k_mask_auto), 0, 0, 1, 1},
+  {&__pyx_n_s_mask_length, __pyx_k_mask_length, sizeof(__pyx_k_mask_length), 0, 0, 1, 1},
+  {&__pyx_n_s_match_score, __pyx_k_match_score, sizeof(__pyx_k_match_score), 0, 0, 1, 1},
+  {&__pyx_n_s_mid_table, __pyx_k_mid_table, sizeof(__pyx_k_mid_table), 0, 0, 1, 1},
+  {&__pyx_n_s_mismatch_score, __pyx_k_mismatch_score, sizeof(__pyx_k_mismatch_score), 0, 0, 1, 1},
+  {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
+  {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
+  {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
+  {&__pyx_n_s_np_aa_table, __pyx_k_np_aa_table, sizeof(__pyx_k_np_aa_table), 0, 0, 1, 1},
+  {&__pyx_n_s_np_nt_table, __pyx_k_np_nt_table, sizeof(__pyx_k_np_nt_table), 0, 0, 1, 1},
+  {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
+  {&__pyx_n_s_optimal_alignment_score, __pyx_k_optimal_alignment_score, sizeof(__pyx_k_optimal_alignment_score), 0, 0, 1, 1},
+  {&__pyx_n_s_ord, __pyx_k_ord, sizeof(__pyx_k_ord), 0, 0, 1, 1},
+  {&__pyx_n_s_override_skip_babp, __pyx_k_override_skip_babp, sizeof(__pyx_k_override_skip_babp), 0, 0, 1, 1},
+  {&__pyx_n_s_property, __pyx_k_property, sizeof(__pyx_k_property), 0, 0, 1, 1},
+  {&__pyx_n_s_protein, __pyx_k_protein, sizeof(__pyx_k_protein), 0, 0, 1, 1},
+  {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
+  {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1},
+  {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
+  {&__pyx_n_s_query, __pyx_k_query, sizeof(__pyx_k_query), 0, 0, 1, 1},
+  {&__pyx_n_s_query_begin, __pyx_k_query_begin, sizeof(__pyx_k_query_begin), 0, 0, 1, 1},
+  {&__pyx_n_s_query_end, __pyx_k_query_end, sizeof(__pyx_k_query_end), 0, 0, 1, 1},
+  {&__pyx_n_s_query_sequence, __pyx_k_query_sequence, sizeof(__pyx_k_query_sequence), 0, 0, 1, 1},
+  {&__pyx_kp_s_r_r, __pyx_k_r_r, sizeof(__pyx_k_r_r), 0, 0, 1, 0},
+  {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
+  {&__pyx_n_s_read_sequence, __pyx_k_read_sequence, sizeof(__pyx_k_read_sequence), 0, 0, 1, 1},
+  {&__pyx_n_s_reference_sequence, __pyx_k_reference_sequence, sizeof(__pyx_k_reference_sequence), 0, 0, 1, 1},
+  {&__pyx_kp_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 0},
+  {&__pyx_n_s_score, __pyx_k_score, sizeof(__pyx_k_score), 0, 0, 1, 1},
+  {&__pyx_n_s_score_filter, __pyx_k_score_filter, sizeof(__pyx_k_score_filter), 0, 0, 1, 1},
+  {&__pyx_n_s_score_only, __pyx_k_score_only, sizeof(__pyx_k_score_only), 0, 0, 1, 1},
+  {&__pyx_n_s_score_size, __pyx_k_score_size, sizeof(__pyx_k_score_size), 0, 0, 1, 1},
+  {&__pyx_n_s_seqs, __pyx_k_seqs, sizeof(__pyx_k_seqs), 0, 0, 1, 1},
+  {&__pyx_n_s_sequence, __pyx_k_sequence, sizeof(__pyx_k_sequence), 0, 0, 1, 1},
+  {&__pyx_n_s_sequence1, __pyx_k_sequence1, sizeof(__pyx_k_sequence1), 0, 0, 1, 1},
+  {&__pyx_n_s_sequence2, __pyx_k_sequence2, sizeof(__pyx_k_sequence2), 0, 0, 1, 1},
+  {&__pyx_n_s_set_zero_based, __pyx_k_set_zero_based, sizeof(__pyx_k_set_zero_based), 0, 0, 1, 1},
+  {&__pyx_n_s_skbio_alignment, __pyx_k_skbio_alignment, sizeof(__pyx_k_skbio_alignment), 0, 0, 1, 1},
+  {&__pyx_n_s_skbio_alignment__ssw_wrapper, __pyx_k_skbio_alignment__ssw_wrapper, sizeof(__pyx_k_skbio_alignment__ssw_wrapper), 0, 0, 1, 1},
+  {&__pyx_n_s_skbio_sequence, __pyx_k_skbio_sequence, sizeof(__pyx_k_skbio_sequence), 0, 0, 1, 1},
+  {&__pyx_n_s_start_end, __pyx_k_start_end, sizeof(__pyx_k_start_end), 0, 0, 1, 1},
+  {&__pyx_n_s_start_end_positions, __pyx_k_start_end_positions, sizeof(__pyx_k_start_end_positions), 0, 0, 1, 1},
+  {&__pyx_n_s_suboptimal_alignment_score, __pyx_k_suboptimal_alignment_score, sizeof(__pyx_k_suboptimal_alignment_score), 0, 0, 1, 1},
+  {&__pyx_n_s_substitution_matrix, __pyx_k_substitution_matrix, sizeof(__pyx_k_substitution_matrix), 0, 0, 1, 1},
+  {&__pyx_n_s_suppress_sequences, __pyx_k_suppress_sequences, sizeof(__pyx_k_suppress_sequences), 0, 0, 1, 1},
+  {&__pyx_n_s_target, __pyx_k_target, sizeof(__pyx_k_target), 0, 0, 1, 1},
+  {&__pyx_n_s_target_begin, __pyx_k_target_begin, sizeof(__pyx_k_target_begin), 0, 0, 1, 1},
+  {&__pyx_n_s_target_end_optimal, __pyx_k_target_end_optimal, sizeof(__pyx_k_target_end_optimal), 0, 0, 1, 1},
+  {&__pyx_n_s_target_end_suboptimal, __pyx_k_target_end_suboptimal, sizeof(__pyx_k_target_end_suboptimal), 0, 0, 1, 1},
+  {&__pyx_n_s_target_sequence, __pyx_k_target_sequence, sizeof(__pyx_k_target_sequence), 0, 0, 1, 1},
+  {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+  {&__pyx_n_s_tuple_cigar, __pyx_k_tuple_cigar, sizeof(__pyx_k_tuple_cigar), 0, 0, 1, 1},
+  {&__pyx_n_s_tuples_from_cigar, __pyx_k_tuples_from_cigar, sizeof(__pyx_k_tuples_from_cigar), 0, 0, 1, 1},
+  {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
+  {&__pyx_n_s_zero_index, __pyx_k_zero_index, sizeof(__pyx_k_zero_index), 0, 0, 1, 1},
+  {0, 0, 0, 0, 0, 0, 0}
+};
+static int __Pyx_InitCachedBuiltins(void) {
+  __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_Exception = __Pyx_GetBuiltinName(__pyx_n_s_Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_ord = __Pyx_GetBuiltinName(__pyx_n_s_ord); if (!__pyx_builtin_ord) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  return 0;
+  __pyx_L1_error:;
+  return -1;
+}
+
+static int __Pyx_InitCachedConstants(void) {
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":140
+ *             align_len = len(query)
+ *             if align_len > 13:
+ *                 target = target[:10] + "..."             # <<<<<<<<<<<<<<
+ *                 query = query[:10] + "..."
+ * 
+ */
+  __pyx_slice__2 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_slice__2);
+  __Pyx_GIVEREF(__pyx_slice__2);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":141
+ *             if align_len > 13:
+ *                 target = target[:10] + "..."
+ *                 query = query[:10] + "..."             # <<<<<<<<<<<<<<
+ * 
+ *             length = "Length: %d" % align_len
+ */
+  __pyx_slice__4 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_slice__4);
+  __Pyx_GIVEREF(__pyx_slice__4);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":386
+ *         # Save the original index scheme and then set it to 0 (1/2)
+ *         orig_z_base = self.is_zero_based()
+ *         self.set_zero_based(True)             # <<<<<<<<<<<<<<
+ *         aligned_sequence = []
+ *         seq = sequence[begin:end + 1]
+ */
+  __pyx_tuple__7 = PyTuple_Pack(1, Py_True); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__7);
+  __Pyx_GIVEREF(__pyx_tuple__7);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":577
+ *         self.read_sequence = query_sequence
+ *         if gap_open_penalty <= 0:
+ *             raise ValueError("`gap_open_penalty` must be > 0")             # <<<<<<<<<<<<<<
+ *         self.gap_open_penalty = gap_open_penalty
+ *         if gap_extend_penalty <= 0:
+ */
+  __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_gap_open_penalty_must_be_0); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__9);
+  __Pyx_GIVEREF(__pyx_tuple__9);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":580
+ *         self.gap_open_penalty = gap_open_penalty
+ *         if gap_extend_penalty <= 0:
+ *             raise ValueError("`gap_extend_penalty` must be > 0")             # <<<<<<<<<<<<<<
+ *         self.gap_extend_penalty = gap_extend_penalty
+ *         self.distance_filter = 0 if distance_filter is None else \
+ */
+  __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_gap_extend_penalty_must_be_0); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__10);
+  __Pyx_GIVEREF(__pyx_tuple__10);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":595
+ *         if substitution_matrix is None:
+ *             if protein:
+ *                 raise Exception("Must provide a substitution matrix for"             # <<<<<<<<<<<<<<
+ *                                 " protein sequences")
+ *             matrix = self._build_match_matrix(match_score, mismatch_score)
+ */
+  __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Must_provide_a_substitution_matr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__11);
+  __Pyx_GIVEREF(__pyx_tuple__11);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ */
+  __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__12);
+  __Pyx_GIVEREF(__pyx_tuple__12);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             info.buf = PyArray_DATA(self)
+ */
+  __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__13);
+  __Pyx_GIVEREF(__pyx_tuple__13);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"
+ */
+  __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__14);
+  __Pyx_GIVEREF(__pyx_tuple__14);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or
+ */
+  __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__15);
+  __Pyx_GIVEREF(__pyx_tuple__15);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+ *         if ((child.byteorder == c'>' and little_endian) or
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *             # One could encode it in the format string and have Cython
+ *             # complain instead, BUT: < and > in format strings also imply
+ */
+  __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__16);
+  __Pyx_GIVEREF(__pyx_tuple__16);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+ *             t = child.type_num
+ *             if end - f < 5:
+ *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
+ * 
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ */
+  __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__17);
+  __Pyx_GIVEREF(__pyx_tuple__17);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":731
+ * 
+ * 
+ * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
+ *                              **kwargs):
+ *     """Align query and target sequences with Striped Smith-Waterman.
+ */
+  __pyx_tuple__18 = PyTuple_Pack(7, __pyx_n_s_sequence1, __pyx_n_s_sequence2, __pyx_n_s_kwargs, __pyx_n_s_query, __pyx_n_s_alignment, __pyx_n_s_start_end, __pyx_n_s_seqs); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__18);
+  __Pyx_GIVEREF(__pyx_tuple__18);
+  __pyx_codeobj__19 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__18, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_n_s_local_pairwise_align_ssw, 731, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_RefNannyFinishContext();
+  return 0;
+  __pyx_L1_error:;
+  __Pyx_RefNannyFinishContext();
+  return -1;
+}
+
+static int __Pyx_InitGlobals(void) {
+  if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_6 = PyInt_FromLong(6); if (unlikely(!__pyx_int_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_7 = PyInt_FromLong(7); if (unlikely(!__pyx_int_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_9 = PyInt_FromLong(9); if (unlikely(!__pyx_int_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_11 = PyInt_FromLong(11); if (unlikely(!__pyx_int_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_12 = PyInt_FromLong(12); if (unlikely(!__pyx_int_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_13 = PyInt_FromLong(13); if (unlikely(!__pyx_int_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_14 = PyInt_FromLong(14); if (unlikely(!__pyx_int_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_16 = PyInt_FromLong(16); if (unlikely(!__pyx_int_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_17 = PyInt_FromLong(17); if (unlikely(!__pyx_int_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_18 = PyInt_FromLong(18); if (unlikely(!__pyx_int_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_19 = PyInt_FromLong(19); if (unlikely(!__pyx_int_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_20 = PyInt_FromLong(20); if (unlikely(!__pyx_int_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_21 = PyInt_FromLong(21); if (unlikely(!__pyx_int_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_22 = PyInt_FromLong(22); if (unlikely(!__pyx_int_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_23 = PyInt_FromLong(23); if (unlikely(!__pyx_int_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_int_neg_3 = PyInt_FromLong(-3); if (unlikely(!__pyx_int_neg_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  return 0;
+  __pyx_L1_error:;
+  return -1;
+}
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC init_ssw_wrapper(void); /*proto*/
+PyMODINIT_FUNC init_ssw_wrapper(void)
+#else
+PyMODINIT_FUNC PyInit__ssw_wrapper(void); /*proto*/
+PyMODINIT_FUNC PyInit__ssw_wrapper(void)
+#endif
+{
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  PyObject *__pyx_t_3 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannyDeclarations
+  #if CYTHON_REFNANNY
+  __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+  if (!__Pyx_RefNanny) {
+      PyErr_Clear();
+      __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+      if (!__Pyx_RefNanny)
+          Py_FatalError("failed to import 'refnanny' module");
+  }
+  #endif
+  __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__ssw_wrapper(void)", 0);
+  if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #ifdef __Pyx_CyFunction_USED
+  if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  #ifdef __Pyx_FusedFunction_USED
+  if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  #ifdef __Pyx_Generator_USED
+  if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  /*--- Library function declarations ---*/
+  /*--- Threads initialization code ---*/
+  #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+  #ifdef WITH_THREAD /* Python build with threading support? */
+  PyEval_InitThreads();
+  #endif
+  #endif
+  /*--- Module creation code ---*/
+  #if PY_MAJOR_VERSION < 3
+  __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_ssw_wrapper"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+  #else
+  __pyx_m = PyModule_Create(&__pyx_moduledef);
+  #endif
+  if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  Py_INCREF(__pyx_d);
+  __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #if CYTHON_COMPILING_IN_PYPY
+  Py_INCREF(__pyx_b);
+  #endif
+  if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  /*--- Initialize various global constants etc. ---*/
+  if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+  if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  if (__pyx_module_is_main_skbio__alignment___ssw_wrapper) {
+    if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  }
+  #if PY_MAJOR_VERSION >= 3
+  {
+    PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!PyDict_GetItemString(modules, "skbio.alignment._ssw_wrapper")) {
+      if (unlikely(PyDict_SetItemString(modules, "skbio.alignment._ssw_wrapper", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+  }
+  #endif
+  /*--- Builtin init code ---*/
+  if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*--- Constants init code ---*/
+  if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*--- Global init code ---*/
+  /*--- Variable export code ---*/
+  /*--- Function export code ---*/
+  /*--- Type init code ---*/
+  __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = &__pyx_vtable_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
+  __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.__pyx___constructor = (PyObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *, s_align *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___constructor;
+  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.tp_print = 0;
+  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_SetAttrString(__pyx_m, "AlignmentStructure", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = &__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
+  __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = &__pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
+  __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._seq_converter = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__seq_converter;
+  __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._build_match_matrix = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__build_match_matrix;
+  __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._convert_dict2d_to_matrix = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__convert_dict2d_to_matrix;
+  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_print = 0;
+  #if CYTHON_COMPILING_IN_CPYTHON
+  {
+    PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, "__call__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {
+      __pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__ = *((PyWrapperDescrObject *)wrapper)->d_base;
+      __pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__.doc = __pyx_doc_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__;
+      ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__;
+    }
+  }
+  #endif
+  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_SetAttrString(__pyx_m, "StripedSmithWaterman", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = &__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
+  /*--- Type import code ---*/
+  __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", 
+  #if CYTHON_COMPILING_IN_PYPY
+  sizeof(PyTypeObject),
+  #else
+  sizeof(PyHeapTypeObject),
+  #endif
+  0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*--- Variable import code ---*/
+  /*--- Function import code ---*/
+  /*--- Execution code ---*/
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":10
+ * 
+ * from cpython cimport bool
+ * import numpy as np             # <<<<<<<<<<<<<<
+ * cimport numpy as cnp
+ * from skbio.alignment import Alignment
+ */
+  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":12
+ * import numpy as np
+ * cimport numpy as cnp
+ * from skbio.alignment import Alignment             # <<<<<<<<<<<<<<
+ * from skbio.sequence import ProteinSequence, NucleotideSequence
+ * 
+ */
+  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_n_s_Alignment);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_Alignment);
+  __Pyx_GIVEREF(__pyx_n_s_Alignment);
+  __pyx_t_2 = __Pyx_Import(__pyx_n_s_skbio_alignment, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_Alignment); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Alignment, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":13
+ * cimport numpy as cnp
+ * from skbio.alignment import Alignment
+ * from skbio.sequence import ProteinSequence, NucleotideSequence             # <<<<<<<<<<<<<<
+ * 
+ * cdef extern from "_lib/ssw.h":
+ */
+  __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_INCREF(__pyx_n_s_ProteinSequence);
+  PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_ProteinSequence);
+  __Pyx_GIVEREF(__pyx_n_s_ProteinSequence);
+  __Pyx_INCREF(__pyx_n_s_NucleotideSequence);
+  PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_NucleotideSequence);
+  __Pyx_GIVEREF(__pyx_n_s_NucleotideSequence);
+  __pyx_t_1 = __Pyx_Import(__pyx_n_s_skbio_sequence, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_ProteinSequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_NucleotideSequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_NucleotideSequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":51
+ *     cdef void align_destroy(s_align* a)
+ * 
+ * np_aa_table = np.array([             # <<<<<<<<<<<<<<
+ *     23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ *     23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ */
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyList_New(128); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 10, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 11, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 12, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 13, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 14, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 15, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 16, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 17, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 18, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 19, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 20, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 21, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 22, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 23, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 24, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 25, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 26, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 27, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 28, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 29, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 30, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 31, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 32, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 33, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 34, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 35, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 36, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 37, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 38, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 39, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 40, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 41, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 42, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 43, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 44, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 45, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 46, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 47, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 48, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 49, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 50, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 51, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 52, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 53, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 54, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 55, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 56, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 57, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 58, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 59, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 60, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 61, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 62, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 63, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 64, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 65, __pyx_int_0);
+  __Pyx_GIVEREF(__pyx_int_0);
+  __Pyx_INCREF(__pyx_int_20);
+  PyList_SET_ITEM(__pyx_t_1, 66, __pyx_int_20);
+  __Pyx_GIVEREF(__pyx_int_20);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 67, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_1, 68, __pyx_int_3);
+  __Pyx_GIVEREF(__pyx_int_3);
+  __Pyx_INCREF(__pyx_int_6);
+  PyList_SET_ITEM(__pyx_t_1, 69, __pyx_int_6);
+  __Pyx_GIVEREF(__pyx_int_6);
+  __Pyx_INCREF(__pyx_int_13);
+  PyList_SET_ITEM(__pyx_t_1, 70, __pyx_int_13);
+  __Pyx_GIVEREF(__pyx_int_13);
+  __Pyx_INCREF(__pyx_int_7);
+  PyList_SET_ITEM(__pyx_t_1, 71, __pyx_int_7);
+  __Pyx_GIVEREF(__pyx_int_7);
+  __Pyx_INCREF(__pyx_int_8);
+  PyList_SET_ITEM(__pyx_t_1, 72, __pyx_int_8);
+  __Pyx_GIVEREF(__pyx_int_8);
+  __Pyx_INCREF(__pyx_int_9);
+  PyList_SET_ITEM(__pyx_t_1, 73, __pyx_int_9);
+  __Pyx_GIVEREF(__pyx_int_9);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 74, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_11);
+  PyList_SET_ITEM(__pyx_t_1, 75, __pyx_int_11);
+  __Pyx_GIVEREF(__pyx_int_11);
+  __Pyx_INCREF(__pyx_int_10);
+  PyList_SET_ITEM(__pyx_t_1, 76, __pyx_int_10);
+  __Pyx_GIVEREF(__pyx_int_10);
+  __Pyx_INCREF(__pyx_int_12);
+  PyList_SET_ITEM(__pyx_t_1, 77, __pyx_int_12);
+  __Pyx_GIVEREF(__pyx_int_12);
+  __Pyx_INCREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_1, 78, __pyx_int_2);
+  __Pyx_GIVEREF(__pyx_int_2);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 79, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_14);
+  PyList_SET_ITEM(__pyx_t_1, 80, __pyx_int_14);
+  __Pyx_GIVEREF(__pyx_int_14);
+  __Pyx_INCREF(__pyx_int_5);
+  PyList_SET_ITEM(__pyx_t_1, 81, __pyx_int_5);
+  __Pyx_GIVEREF(__pyx_int_5);
+  __Pyx_INCREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_1, 82, __pyx_int_1);
+  __Pyx_GIVEREF(__pyx_int_1);
+  __Pyx_INCREF(__pyx_int_15);
+  PyList_SET_ITEM(__pyx_t_1, 83, __pyx_int_15);
+  __Pyx_GIVEREF(__pyx_int_15);
+  __Pyx_INCREF(__pyx_int_16);
+  PyList_SET_ITEM(__pyx_t_1, 84, __pyx_int_16);
+  __Pyx_GIVEREF(__pyx_int_16);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 85, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_19);
+  PyList_SET_ITEM(__pyx_t_1, 86, __pyx_int_19);
+  __Pyx_GIVEREF(__pyx_int_19);
+  __Pyx_INCREF(__pyx_int_17);
+  PyList_SET_ITEM(__pyx_t_1, 87, __pyx_int_17);
+  __Pyx_GIVEREF(__pyx_int_17);
+  __Pyx_INCREF(__pyx_int_22);
+  PyList_SET_ITEM(__pyx_t_1, 88, __pyx_int_22);
+  __Pyx_GIVEREF(__pyx_int_22);
+  __Pyx_INCREF(__pyx_int_18);
+  PyList_SET_ITEM(__pyx_t_1, 89, __pyx_int_18);
+  __Pyx_GIVEREF(__pyx_int_18);
+  __Pyx_INCREF(__pyx_int_21);
+  PyList_SET_ITEM(__pyx_t_1, 90, __pyx_int_21);
+  __Pyx_GIVEREF(__pyx_int_21);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 91, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 92, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 93, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 94, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 95, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 96, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 97, __pyx_int_0);
+  __Pyx_GIVEREF(__pyx_int_0);
+  __Pyx_INCREF(__pyx_int_20);
+  PyList_SET_ITEM(__pyx_t_1, 98, __pyx_int_20);
+  __Pyx_GIVEREF(__pyx_int_20);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 99, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_1, 100, __pyx_int_3);
+  __Pyx_GIVEREF(__pyx_int_3);
+  __Pyx_INCREF(__pyx_int_6);
+  PyList_SET_ITEM(__pyx_t_1, 101, __pyx_int_6);
+  __Pyx_GIVEREF(__pyx_int_6);
+  __Pyx_INCREF(__pyx_int_13);
+  PyList_SET_ITEM(__pyx_t_1, 102, __pyx_int_13);
+  __Pyx_GIVEREF(__pyx_int_13);
+  __Pyx_INCREF(__pyx_int_7);
+  PyList_SET_ITEM(__pyx_t_1, 103, __pyx_int_7);
+  __Pyx_GIVEREF(__pyx_int_7);
+  __Pyx_INCREF(__pyx_int_8);
+  PyList_SET_ITEM(__pyx_t_1, 104, __pyx_int_8);
+  __Pyx_GIVEREF(__pyx_int_8);
+  __Pyx_INCREF(__pyx_int_9);
+  PyList_SET_ITEM(__pyx_t_1, 105, __pyx_int_9);
+  __Pyx_GIVEREF(__pyx_int_9);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 106, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_11);
+  PyList_SET_ITEM(__pyx_t_1, 107, __pyx_int_11);
+  __Pyx_GIVEREF(__pyx_int_11);
+  __Pyx_INCREF(__pyx_int_10);
+  PyList_SET_ITEM(__pyx_t_1, 108, __pyx_int_10);
+  __Pyx_GIVEREF(__pyx_int_10);
+  __Pyx_INCREF(__pyx_int_12);
+  PyList_SET_ITEM(__pyx_t_1, 109, __pyx_int_12);
+  __Pyx_GIVEREF(__pyx_int_12);
+  __Pyx_INCREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_1, 110, __pyx_int_2);
+  __Pyx_GIVEREF(__pyx_int_2);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 111, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_14);
+  PyList_SET_ITEM(__pyx_t_1, 112, __pyx_int_14);
+  __Pyx_GIVEREF(__pyx_int_14);
+  __Pyx_INCREF(__pyx_int_5);
+  PyList_SET_ITEM(__pyx_t_1, 113, __pyx_int_5);
+  __Pyx_GIVEREF(__pyx_int_5);
+  __Pyx_INCREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_1, 114, __pyx_int_1);
+  __Pyx_GIVEREF(__pyx_int_1);
+  __Pyx_INCREF(__pyx_int_15);
+  PyList_SET_ITEM(__pyx_t_1, 115, __pyx_int_15);
+  __Pyx_GIVEREF(__pyx_int_15);
+  __Pyx_INCREF(__pyx_int_16);
+  PyList_SET_ITEM(__pyx_t_1, 116, __pyx_int_16);
+  __Pyx_GIVEREF(__pyx_int_16);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 117, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_19);
+  PyList_SET_ITEM(__pyx_t_1, 118, __pyx_int_19);
+  __Pyx_GIVEREF(__pyx_int_19);
+  __Pyx_INCREF(__pyx_int_17);
+  PyList_SET_ITEM(__pyx_t_1, 119, __pyx_int_17);
+  __Pyx_GIVEREF(__pyx_int_17);
+  __Pyx_INCREF(__pyx_int_22);
+  PyList_SET_ITEM(__pyx_t_1, 120, __pyx_int_22);
+  __Pyx_GIVEREF(__pyx_int_22);
+  __Pyx_INCREF(__pyx_int_18);
+  PyList_SET_ITEM(__pyx_t_1, 121, __pyx_int_18);
+  __Pyx_GIVEREF(__pyx_int_18);
+  __Pyx_INCREF(__pyx_int_21);
+  PyList_SET_ITEM(__pyx_t_1, 122, __pyx_int_21);
+  __Pyx_GIVEREF(__pyx_int_21);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 123, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 124, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 125, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 126, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __Pyx_INCREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_1, 127, __pyx_int_23);
+  __Pyx_GIVEREF(__pyx_int_23);
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_aa_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":61
+ *     14,  5,  1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23])
+ * 
+ * np_nt_table = np.array([             # <<<<<<<<<<<<<<
+ *     4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
+ *     4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
+ */
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyList_New(128); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 10, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 11, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 12, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 13, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 14, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 15, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 16, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 17, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 18, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 19, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 20, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 21, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 22, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 23, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 24, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 25, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 26, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 27, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 28, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 29, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 30, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 31, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 32, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 33, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 34, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 35, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 36, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 37, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 38, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 39, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 40, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 41, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 42, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 43, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 44, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 45, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 46, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 47, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 48, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 49, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 50, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 51, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 52, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 53, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 54, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 55, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 56, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 57, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 58, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 59, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 60, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 61, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 62, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 63, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 64, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 65, __pyx_int_0);
+  __Pyx_GIVEREF(__pyx_int_0);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 66, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_1, 67, __pyx_int_1);
+  __Pyx_GIVEREF(__pyx_int_1);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 68, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 69, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 70, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_1, 71, __pyx_int_2);
+  __Pyx_GIVEREF(__pyx_int_2);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 72, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 73, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 74, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 75, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 76, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 77, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 78, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 79, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 80, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 81, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 82, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 83, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_1, 84, __pyx_int_3);
+  __Pyx_GIVEREF(__pyx_int_3);
+  __Pyx_INCREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 85, __pyx_int_0);
+  __Pyx_GIVEREF(__pyx_int_0);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 86, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 87, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 88, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 89, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 90, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 91, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 92, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 93, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 94, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 95, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 96, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 97, __pyx_int_0);
+  __Pyx_GIVEREF(__pyx_int_0);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 98, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_1, 99, __pyx_int_1);
+  __Pyx_GIVEREF(__pyx_int_1);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 100, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 101, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 102, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_1, 103, __pyx_int_2);
+  __Pyx_GIVEREF(__pyx_int_2);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 104, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 105, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 106, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 107, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 108, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 109, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 110, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 111, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 112, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 113, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 114, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 115, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_1, 116, __pyx_int_3);
+  __Pyx_GIVEREF(__pyx_int_3);
+  __Pyx_INCREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_1, 117, __pyx_int_0);
+  __Pyx_GIVEREF(__pyx_int_0);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 118, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 119, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 120, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 121, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 122, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 123, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 124, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 125, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 126, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __Pyx_INCREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_1, 127, __pyx_int_4);
+  __Pyx_GIVEREF(__pyx_int_4);
+  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_nt_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":71
+ *     4,  4,  4,  4,  3,  0,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4])
+ * 
+ * mid_table = np.array(['M', 'I', 'D'])             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyList_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_n_s_M);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_M);
+  __Pyx_GIVEREF(__pyx_n_s_M);
+  __Pyx_INCREF(__pyx_n_s_I);
+  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_I);
+  __Pyx_GIVEREF(__pyx_n_s_I);
+  __Pyx_INCREF(__pyx_n_s_D);
+  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_D);
+  __Pyx_GIVEREF(__pyx_n_s_D);
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_mid_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":148
+ * 
+ *     @property
+ *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
+ *         """Optimal alignment score
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":147
+ *         return score
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def optimal_alignment_score(self):
+ *         """Optimal alignment score
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_optimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":160
+ * 
+ *     @property
+ *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
+ *         """Suboptimal alignment score
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_suboptimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":159
+ *         return self.p.score1
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def suboptimal_alignment_score(self):
+ *         """Suboptimal alignment score
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_suboptimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":172
+ * 
+ *     @property
+ *     def target_begin(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's alignment begins
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":171
+ *         return self.p.score2
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def target_begin(self):
+ *         """Character index where the target's alignment begins
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":189
+ * 
+ *     @property
+ *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's optimal alignment ends
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":188
+ *                                                             >= 0) else -1
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def target_end_optimal(self):
+ *         """Character index where the target's optimal alignment ends
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_optimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":206
+ * 
+ *     @property
+ *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
+ *         """Character index where the target's suboptimal alignment ends
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_suboptimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":205
+ *         return self.p.ref_end1 + self.index_starts_at
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def target_end_suboptimal(self):
+ *         """Character index where the target's suboptimal alignment ends
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_suboptimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":223
+ * 
+ *     @property
+ *     def query_begin(self):             # <<<<<<<<<<<<<<
+ *         """Returns the character index at which the query sequence begins
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":222
+ *         return self.p.ref_end2 + self.index_starts_at
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def query_begin(self):
+ *         """Returns the character index at which the query sequence begins
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":240
+ * 
+ *     @property
+ *     def query_end(self):             # <<<<<<<<<<<<<<
+ *         """Character index at where query sequence ends
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_end); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":239
+ *                                                              >= 0) else -1
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def query_end(self):
+ *         """Character index at where query sequence ends
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_end, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":256
+ * 
+ *     @property
+ *     def cigar(self):             # <<<<<<<<<<<<<<
+ *         """Cigar formatted string for the optimal alignment
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":255
+ *         return self.p.read_end1 + self.index_starts_at
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def cigar(self):
+ *         """Cigar formatted string for the optimal alignment
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_cigar, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":294
+ * 
+ *     @property
+ *     def query_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Query sequence
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":293
+ *         return self._cigar_string
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def query_sequence(self):
+ *         """Query sequence
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":306
+ * 
+ *     @property
+ *     def target_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Target sequence
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":305
+ *         return self.read_sequence
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def target_sequence(self):
+ *         """Target sequence
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 305; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 305; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":318
+ * 
+ *     @property
+ *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Returns the query sequence aligned by the cigar
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":317
+ *         return self.reference_sequence
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def aligned_query_sequence(self):
+ *         """Returns the query sequence aligned by the cigar
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":340
+ * 
+ *     @property
+ *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
+ *         """Returns the target sequence aligned by the cigar
+ * 
+ */
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":339
+ *         return None
+ * 
+ *     @property             # <<<<<<<<<<<<<<
+ *     def aligned_target_sequence(self):
+ *         """Returns the target sequence aligned by the cigar
+ */
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __Pyx_GIVEREF(__pyx_t_1);
+  __pyx_t_1 = 0;
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":731
+ * 
+ * 
+ * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
+ *                              **kwargs):
+ *     """Align query and target sequences with Striped Smith-Waterman.
+ */
+  __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw, NULL, __pyx_n_s_skbio_alignment__ssw_wrapper); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_local_pairwise_align_ssw, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/alignment/_ssw_wrapper.pyx":1
+ * # -----------------------------------------------------------------------------             # <<<<<<<<<<<<<<
+ * #  Copyright (c) 2013--, scikit-bio development team.
+ * #
+ */
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+ *      arr.base = baseptr
+ * 
+ * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
+ *     if arr.base is NULL:
+ *         return None
+ */
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_XDECREF(__pyx_t_3);
+  if (__pyx_m) {
+    __Pyx_AddTraceback("init skbio.alignment._ssw_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
+    Py_DECREF(__pyx_m); __pyx_m = 0;
+  } else if (!PyErr_Occurred()) {
+    PyErr_SetString(PyExc_ImportError, "init skbio.alignment._ssw_wrapper");
+  }
+  __pyx_L0:;
+  __Pyx_RefNannyFinishContext();
+  #if PY_MAJOR_VERSION < 3
+  return;
+  #else
+  return __pyx_m;
+  #endif
+}
+
+/* Runtime support code */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+    PyObject *m = NULL, *p = NULL;
+    void *r = NULL;
+    m = PyImport_ImportModule((char *)modname);
+    if (!m) goto end;
+    p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
+    if (!p) goto end;
+    r = PyLong_AsVoidPtr(p);
+end:
+    Py_XDECREF(p);
+    Py_XDECREF(m);
+    return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif /* CYTHON_REFNANNY */
+
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+    PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+    if (unlikely(!result)) {
+        PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+            "name '%U' is not defined", name);
+#else
+            "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+    }
+    return result;
+}
+
+static void __Pyx_RaiseArgtupleInvalid(
+    const char* func_name,
+    int exact,
+    Py_ssize_t num_min,
+    Py_ssize_t num_max,
+    Py_ssize_t num_found)
+{
+    Py_ssize_t num_expected;
+    const char *more_or_less;
+    if (num_found < num_min) {
+        num_expected = num_min;
+        more_or_less = "at least";
+    } else {
+        num_expected = num_max;
+        more_or_less = "at most";
+    }
+    if (exact) {
+        more_or_less = "exactly";
+    }
+    PyErr_Format(PyExc_TypeError,
+                 "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+                 func_name, more_or_less, num_expected,
+                 (num_expected == 1) ? "" : "s", num_found);
+}
+
+static void __Pyx_RaiseDoubleKeywordsError(
+    const char* func_name,
+    PyObject* kw_name)
+{
+    PyErr_Format(PyExc_TypeError,
+        #if PY_MAJOR_VERSION >= 3
+        "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+        #else
+        "%s() got multiple values for keyword argument '%s'", func_name,
+        PyString_AsString(kw_name));
+        #endif
+}
+
+static int __Pyx_ParseOptionalKeywords(
+    PyObject *kwds,
+    PyObject **argnames[],
+    PyObject *kwds2,
+    PyObject *values[],
+    Py_ssize_t num_pos_args,
+    const char* function_name)
+{
+    PyObject *key = 0, *value = 0;
+    Py_ssize_t pos = 0;
+    PyObject*** name;
+    PyObject*** first_kw_arg = argnames + num_pos_args;
+    while (PyDict_Next(kwds, &pos, &key, &value)) {
+        name = first_kw_arg;
+        while (*name && (**name != key)) name++;
+        if (*name) {
+            values[name-argnames] = value;
+            continue;
+        }
+        name = first_kw_arg;
+        #if PY_MAJOR_VERSION < 3
+        if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
+            while (*name) {
+                if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+                        && _PyString_Eq(**name, key)) {
+                    values[name-argnames] = value;
+                    break;
+                }
+                name++;
+            }
+            if (*name) continue;
+            else {
+                PyObject*** argname = argnames;
+                while (argname != first_kw_arg) {
+                    if ((**argname == key) || (
+                            (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+                             && _PyString_Eq(**argname, key))) {
+                        goto arg_passed_twice;
+                    }
+                    argname++;
+                }
+            }
+        } else
+        #endif
+        if (likely(PyUnicode_Check(key))) {
+            while (*name) {
+                int cmp = (**name == key) ? 0 :
+                #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+                    (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
+                #endif
+                    PyUnicode_Compare(**name, key);
+                if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+                if (cmp == 0) {
+                    values[name-argnames] = value;
+                    break;
+                }
+                name++;
+            }
+            if (*name) continue;
+            else {
+                PyObject*** argname = argnames;
+                while (argname != first_kw_arg) {
+                    int cmp = (**argname == key) ? 0 :
+                    #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+                        (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
+                    #endif
+                        PyUnicode_Compare(**argname, key);
+                    if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+                    if (cmp == 0) goto arg_passed_twice;
+                    argname++;
+                }
+            }
+        } else
+            goto invalid_keyword_type;
+        if (kwds2) {
+            if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+        } else {
+            goto invalid_keyword;
+        }
+    }
+    return 0;
+arg_passed_twice:
+    __Pyx_RaiseDoubleKeywordsError(function_name, key);
+    goto bad;
+invalid_keyword_type:
+    PyErr_Format(PyExc_TypeError,
+        "%.200s() keywords must be strings", function_name);
+    goto bad;
+invalid_keyword:
+    PyErr_Format(PyExc_TypeError,
+    #if PY_MAJOR_VERSION < 3
+        "%.200s() got an unexpected keyword argument '%.200s'",
+        function_name, PyString_AsString(key));
+    #else
+        "%s() got an unexpected keyword argument '%U'",
+        function_name, key);
+    #endif
+bad:
+    return -1;
+}
+
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
+#if CYTHON_COMPILING_IN_CPYTHON
+#if PY_MAJOR_VERSION >= 3
+    if (likely(PyUnicode_Check(n)))
+#else
+    if (likely(PyString_Check(n)))
+#endif
+        return __Pyx_PyObject_GetAttrStr(o, n);
+#endif
+    return PyObject_GetAttr(o, n);
+}
+
+#if !CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values) {
+    return PyObject_CallMethodObjArgs(sep, __pyx_n_s_join, values, NULL);
+}
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+    PyObject *result;
+    ternaryfunc call = func->ob_type->tp_call;
+    if (unlikely(!call))
+        return PyObject_Call(func, arg, kw);
+#if PY_VERSION_HEX >= 0x02060000
+    if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+        return NULL;
+#endif
+    result = (*call)(func, arg, kw);
+#if PY_VERSION_HEX >= 0x02060000
+    Py_LeaveRecursiveCall();
+#endif
+    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+        PyErr_SetString(
+            PyExc_SystemError,
+            "NULL result without error in PyObject_Call");
+    }
+    return result;
+}
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+        PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
+        int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    PyMappingMethods* mp;
+#if PY_MAJOR_VERSION < 3
+    PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
+    if (likely(ms && ms->sq_slice)) {
+        if (!has_cstart) {
+            if (_py_start && (*_py_start != Py_None)) {
+                cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
+                if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+            } else
+                cstart = 0;
+        }
+        if (!has_cstop) {
+            if (_py_stop && (*_py_stop != Py_None)) {
+                cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
+                if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+            } else
+                cstop = PY_SSIZE_T_MAX;
+        }
+        if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
+            Py_ssize_t l = ms->sq_length(obj);
+            if (likely(l >= 0)) {
+                if (cstop < 0) {
+                    cstop += l;
+                    if (cstop < 0) cstop = 0;
+                }
+                if (cstart < 0) {
+                    cstart += l;
+                    if (cstart < 0) cstart = 0;
+                }
+            } else {
+                if (PyErr_ExceptionMatches(PyExc_OverflowError))
+                    PyErr_Clear();
+                else
+                    goto bad;
+            }
+        }
+        return ms->sq_slice(obj, cstart, cstop);
+    }
+#endif
+    mp = Py_TYPE(obj)->tp_as_mapping;
+    if (likely(mp && mp->mp_subscript))
+#endif
+    {
+        PyObject* result;
+        PyObject *py_slice, *py_start, *py_stop;
+        if (_py_slice) {
+            py_slice = *_py_slice;
+        } else {
+            PyObject* owned_start = NULL;
+            PyObject* owned_stop = NULL;
+            if (_py_start) {
+                py_start = *_py_start;
+            } else {
+                if (has_cstart) {
+                    owned_start = py_start = PyInt_FromSsize_t(cstart);
+                    if (unlikely(!py_start)) goto bad;
+                } else
+                    py_start = Py_None;
+            }
+            if (_py_stop) {
+                py_stop = *_py_stop;
+            } else {
+                if (has_cstop) {
+                    owned_stop = py_stop = PyInt_FromSsize_t(cstop);
+                    if (unlikely(!py_stop)) {
+                        Py_XDECREF(owned_start);
+                        goto bad;
+                    }
+                } else
+                    py_stop = Py_None;
+            }
+            py_slice = PySlice_New(py_start, py_stop, Py_None);
+            Py_XDECREF(owned_start);
+            Py_XDECREF(owned_stop);
+            if (unlikely(!py_slice)) goto bad;
+        }
+#if CYTHON_COMPILING_IN_CPYTHON
+        result = mp->mp_subscript(obj, py_slice);
+#else
+        result = PyObject_GetItem(obj, py_slice);
+#endif
+        if (!_py_slice) {
+            Py_DECREF(py_slice);
+        }
+        return result;
+    }
+    PyErr_Format(PyExc_TypeError,
+        "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
+bad:
+    return NULL;
+}
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
+    PyObject *result;
+#if CYTHON_COMPILING_IN_CPYTHON
+    result = PyDict_GetItem(__pyx_d, name);
+    if (result) {
+        Py_INCREF(result);
+    } else {
+#else
+    result = PyObject_GetItem(__pyx_d, name);
+    if (!result) {
+        PyErr_Clear();
+#endif
+        result = __Pyx_GetBuiltinName(name);
+    }
+    return result;
+}
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+    PyObject *r;
+    if (!j) return NULL;
+    r = PyObject_GetItem(o, j);
+    Py_DECREF(j);
+    return r;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+                                                              int wraparound, int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
+    if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+        PyObject *r = PyList_GET_ITEM(o, i);
+        Py_INCREF(r);
+        return r;
+    }
+    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+    return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+                                                              int wraparound, int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
+    if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+        PyObject *r = PyTuple_GET_ITEM(o, i);
+        Py_INCREF(r);
+        return r;
+    }
+    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+    return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+                                                     int is_list, int wraparound, int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    if (is_list || PyList_CheckExact(o)) {
+        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
+        if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
+            PyObject *r = PyList_GET_ITEM(o, n);
+            Py_INCREF(r);
+            return r;
+        }
+    }
+    else if (PyTuple_CheckExact(o)) {
+        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
+        if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
+            PyObject *r = PyTuple_GET_ITEM(o, n);
+            Py_INCREF(r);
+            return r;
+        }
+    } else {
+        PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+        if (likely(m && m->sq_item)) {
+            if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+                Py_ssize_t l = m->sq_length(o);
+                if (likely(l >= 0)) {
+                    i += l;
+                } else {
+                    if (PyErr_ExceptionMatches(PyExc_OverflowError))
+                        PyErr_Clear();
+                    else
+                        return NULL;
+                }
+            }
+            return m->sq_item(o, i);
+        }
+    }
+#else
+    if (is_list || PySequence_Check(o)) {
+        return PySequence_GetItem(o, i);
+    }
+#endif
+    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
+    PyErr_Format(PyExc_ValueError,
+                 "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
+}
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
+    PyErr_Format(PyExc_ValueError,
+                 "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
+                 index, (index == 1) ? "" : "s");
+}
+
+static CYTHON_INLINE int __Pyx_IterFinish(void) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    PyThreadState *tstate = PyThreadState_GET();
+    PyObject* exc_type = tstate->curexc_type;
+    if (unlikely(exc_type)) {
+        if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) {
+            PyObject *exc_value, *exc_tb;
+            exc_value = tstate->curexc_value;
+            exc_tb = tstate->curexc_traceback;
+            tstate->curexc_type = 0;
+            tstate->curexc_value = 0;
+            tstate->curexc_traceback = 0;
+            Py_DECREF(exc_type);
+            Py_XDECREF(exc_value);
+            Py_XDECREF(exc_tb);
+            return 0;
+        } else {
+            return -1;
+        }
+    }
+    return 0;
+#else
+    if (unlikely(PyErr_Occurred())) {
+        if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
+            PyErr_Clear();
+            return 0;
+        } else {
+            return -1;
+        }
+    }
+    return 0;
+#endif
+}
+
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
+    if (unlikely(retval)) {
+        Py_DECREF(retval);
+        __Pyx_RaiseTooManyValuesError(expected);
+        return -1;
+    } else {
+        return __Pyx_IterFinish();
+    }
+    return 0;
+}
+
+static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
+#if CYTHON_COMPILING_IN_PYPY
+    return PyObject_RichCompareBool(s1, s2, equals);
+#else
+    if (s1 == s2) {
+        return (equals == Py_EQ);
+    } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
+        const char *ps1, *ps2;
+        Py_ssize_t length = PyBytes_GET_SIZE(s1);
+        if (length != PyBytes_GET_SIZE(s2))
+            return (equals == Py_NE);
+        ps1 = PyBytes_AS_STRING(s1);
+        ps2 = PyBytes_AS_STRING(s2);
+        if (ps1[0] != ps2[0]) {
+            return (equals == Py_NE);
+        } else if (length == 1) {
+            return (equals == Py_EQ);
+        } else {
+            int result = memcmp(ps1, ps2, (size_t)length);
+            return (equals == Py_EQ) ? (result == 0) : (result != 0);
+        }
+    } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
+        return (equals == Py_NE);
+    } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
+        return (equals == Py_NE);
+    } else {
+        int result;
+        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
+        if (!py_result)
+            return -1;
+        result = __Pyx_PyObject_IsTrue(py_result);
+        Py_DECREF(py_result);
+        return result;
+    }
+#endif
+}
+
+static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
+#if CYTHON_COMPILING_IN_PYPY
+    return PyObject_RichCompareBool(s1, s2, equals);
+#else
+#if PY_MAJOR_VERSION < 3
+    PyObject* owned_ref = NULL;
+#endif
+    int s1_is_unicode, s2_is_unicode;
+    if (s1 == s2) {
+        goto return_eq;
+    }
+    s1_is_unicode = PyUnicode_CheckExact(s1);
+    s2_is_unicode = PyUnicode_CheckExact(s2);
+#if PY_MAJOR_VERSION < 3
+    if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
+        owned_ref = PyUnicode_FromObject(s2);
+        if (unlikely(!owned_ref))
+            return -1;
+        s2 = owned_ref;
+        s2_is_unicode = 1;
+    } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
+        owned_ref = PyUnicode_FromObject(s1);
+        if (unlikely(!owned_ref))
+            return -1;
+        s1 = owned_ref;
+        s1_is_unicode = 1;
+    } else if (((!s2_is_unicode) & (!s1_is_unicode))) {
+        return __Pyx_PyBytes_Equals(s1, s2, equals);
+    }
+#endif
+    if (s1_is_unicode & s2_is_unicode) {
+        Py_ssize_t length;
+        int kind;
+        void *data1, *data2;
+        #if CYTHON_PEP393_ENABLED
+        if (unlikely(PyUnicode_READY(s1) < 0) || unlikely(PyUnicode_READY(s2) < 0))
+            return -1;
+        #endif
+        length = __Pyx_PyUnicode_GET_LENGTH(s1);
+        if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
+            goto return_ne;
+        }
+        kind = __Pyx_PyUnicode_KIND(s1);
+        if (kind != __Pyx_PyUnicode_KIND(s2)) {
+            goto return_ne;
+        }
+        data1 = __Pyx_PyUnicode_DATA(s1);
+        data2 = __Pyx_PyUnicode_DATA(s2);
+        if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
+            goto return_ne;
+        } else if (length == 1) {
+            goto return_eq;
+        } else {
+            int result = memcmp(data1, data2, (size_t)(length * kind));
+            #if PY_MAJOR_VERSION < 3
+            Py_XDECREF(owned_ref);
+            #endif
+            return (equals == Py_EQ) ? (result == 0) : (result != 0);
+        }
+    } else if ((s1 == Py_None) & s2_is_unicode) {
+        goto return_ne;
+    } else if ((s2 == Py_None) & s1_is_unicode) {
+        goto return_ne;
+    } else {
+        int result;
+        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
+        if (!py_result)
+            return -1;
+        result = __Pyx_PyObject_IsTrue(py_result);
+        Py_DECREF(py_result);
+        return result;
+    }
+return_eq:
+    #if PY_MAJOR_VERSION < 3
+    Py_XDECREF(owned_ref);
+    #endif
+    return (equals == Py_EQ);
+return_ne:
+    #if PY_MAJOR_VERSION < 3
+    Py_XDECREF(owned_ref);
+    #endif
+    return (equals == Py_NE);
+#endif
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    PyObject *tmp_type, *tmp_value, *tmp_tb;
+    PyThreadState *tstate = PyThreadState_GET();
+    tmp_type = tstate->curexc_type;
+    tmp_value = tstate->curexc_value;
+    tmp_tb = tstate->curexc_traceback;
+    tstate->curexc_type = type;
+    tstate->curexc_value = value;
+    tstate->curexc_traceback = tb;
+    Py_XDECREF(tmp_type);
+    Py_XDECREF(tmp_value);
+    Py_XDECREF(tmp_tb);
+#else
+    PyErr_Restore(type, value, tb);
+#endif
+}
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    PyThreadState *tstate = PyThreadState_GET();
+    *type = tstate->curexc_type;
+    *value = tstate->curexc_value;
+    *tb = tstate->curexc_traceback;
+    tstate->curexc_type = 0;
+    tstate->curexc_value = 0;
+    tstate->curexc_traceback = 0;
+#else
+    PyErr_Fetch(type, value, tb);
+#endif
+}
+
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
+                        CYTHON_UNUSED PyObject *cause) {
+    Py_XINCREF(type);
+    if (!value || value == Py_None)
+        value = NULL;
+    else
+        Py_INCREF(value);
+    if (!tb || tb == Py_None)
+        tb = NULL;
+    else {
+        Py_INCREF(tb);
+        if (!PyTraceBack_Check(tb)) {
+            PyErr_SetString(PyExc_TypeError,
+                "raise: arg 3 must be a traceback or None");
+            goto raise_error;
+        }
+    }
+    #if PY_VERSION_HEX < 0x02050000
+    if (PyClass_Check(type)) {
+    #else
+    if (PyType_Check(type)) {
+    #endif
+#if CYTHON_COMPILING_IN_PYPY
+        if (!value) {
+            Py_INCREF(Py_None);
+            value = Py_None;
+        }
+#endif
+        PyErr_NormalizeException(&type, &value, &tb);
+    } else {
+        if (value) {
+            PyErr_SetString(PyExc_TypeError,
+                "instance exception may not have a separate value");
+            goto raise_error;
+        }
+        value = type;
+        #if PY_VERSION_HEX < 0x02050000
+        if (PyInstance_Check(type)) {
+            type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+            Py_INCREF(type);
+        } else {
+            type = 0;
+            PyErr_SetString(PyExc_TypeError,
+                "raise: exception must be an old-style class or instance");
+            goto raise_error;
+        }
+        #else
+        type = (PyObject*) Py_TYPE(type);
+        Py_INCREF(type);
+        if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+            PyErr_SetString(PyExc_TypeError,
+                "raise: exception class must be a subclass of BaseException");
+            goto raise_error;
+        }
+        #endif
+    }
+    __Pyx_ErrRestore(type, value, tb);
+    return;
+raise_error:
+    Py_XDECREF(value);
+    Py_XDECREF(type);
+    Py_XDECREF(tb);
+    return;
+}
+#else /* Python 3+ */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+    PyObject* owned_instance = NULL;
+    if (tb == Py_None) {
+        tb = 0;
+    } else if (tb && !PyTraceBack_Check(tb)) {
+        PyErr_SetString(PyExc_TypeError,
+            "raise: arg 3 must be a traceback or None");
+        goto bad;
+    }
+    if (value == Py_None)
+        value = 0;
+    if (PyExceptionInstance_Check(type)) {
+        if (value) {
+            PyErr_SetString(PyExc_TypeError,
+                "instance exception may not have a separate value");
+            goto bad;
+        }
+        value = type;
+        type = (PyObject*) Py_TYPE(value);
+    } else if (PyExceptionClass_Check(type)) {
+        PyObject *instance_class = NULL;
+        if (value && PyExceptionInstance_Check(value)) {
+            instance_class = (PyObject*) Py_TYPE(value);
+            if (instance_class != type) {
+                if (PyObject_IsSubclass(instance_class, type)) {
+                    type = instance_class;
+                } else {
+                    instance_class = NULL;
+                }
+            }
+        }
+        if (!instance_class) {
+            PyObject *args;
+            if (!value)
+                args = PyTuple_New(0);
+            else if (PyTuple_Check(value)) {
+                Py_INCREF(value);
+                args = value;
+            } else
+                args = PyTuple_Pack(1, value);
+            if (!args)
+                goto bad;
+            owned_instance = PyObject_Call(type, args, NULL);
+            Py_DECREF(args);
+            if (!owned_instance)
+                goto bad;
+            value = owned_instance;
+            if (!PyExceptionInstance_Check(value)) {
+                PyErr_Format(PyExc_TypeError,
+                             "calling %R should have returned an instance of "
+                             "BaseException, not %R",
+                             type, Py_TYPE(value));
+                goto bad;
+            }
+        }
+    } else {
+        PyErr_SetString(PyExc_TypeError,
+            "raise: exception class must be a subclass of BaseException");
+        goto bad;
+    }
+#if PY_VERSION_HEX >= 0x03030000
+    if (cause) {
+#else
+    if (cause && cause != Py_None) {
+#endif
+        PyObject *fixed_cause;
+        if (cause == Py_None) {
+            fixed_cause = NULL;
+        } else if (PyExceptionClass_Check(cause)) {
+            fixed_cause = PyObject_CallObject(cause, NULL);
+            if (fixed_cause == NULL)
+                goto bad;
+        } else if (PyExceptionInstance_Check(cause)) {
+            fixed_cause = cause;
+            Py_INCREF(fixed_cause);
+        } else {
+            PyErr_SetString(PyExc_TypeError,
+                            "exception causes must derive from "
+                            "BaseException");
+            goto bad;
+        }
+        PyException_SetCause(value, fixed_cause);
+    }
+    PyErr_SetObject(type, value);
+    if (tb) {
+        PyThreadState *tstate = PyThreadState_GET();
+        PyObject* tmp_tb = tstate->curexc_traceback;
+        if (tb != tmp_tb) {
+            Py_INCREF(tb);
+            tstate->curexc_traceback = tb;
+            Py_XDECREF(tmp_tb);
+        }
+    }
+bad:
+    Py_XDECREF(owned_instance);
+    return;
+}
+#endif
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+    if (unlikely(!type)) {
+        PyErr_SetString(PyExc_SystemError, "Missing type object");
+        return 0;
+    }
+    if (likely(PyObject_TypeCheck(obj, type)))
+        return 1;
+    PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
+                 Py_TYPE(obj)->tp_name, type->tp_name);
+    return 0;
+}
+
+static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
+  unsigned int n = 1;
+  return *(unsigned char*)(&n) != 0;
+}
+static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
+                              __Pyx_BufFmt_StackElem* stack,
+                              __Pyx_TypeInfo* type) {
+  stack[0].field = &ctx->root;
+  stack[0].parent_offset = 0;
+  ctx->root.type = type;
+  ctx->root.name = "buffer dtype";
+  ctx->root.offset = 0;
+  ctx->head = stack;
+  ctx->head->field = &ctx->root;
+  ctx->fmt_offset = 0;
+  ctx->head->parent_offset = 0;
+  ctx->new_packmode = '@';
+  ctx->enc_packmode = '@';
+  ctx->new_count = 1;
+  ctx->enc_count = 0;
+  ctx->enc_type = 0;
+  ctx->is_complex = 0;
+  ctx->is_valid_array = 0;
+  ctx->struct_alignment = 0;
+  while (type->typegroup == 'S') {
+    ++ctx->head;
+    ctx->head->field = type->fields;
+    ctx->head->parent_offset = 0;
+    type = type->fields->type;
+  }
+}
+static int __Pyx_BufFmt_ParseNumber(const char** ts) {
+    int count;
+    const char* t = *ts;
+    if (*t < '0' || *t > '9') {
+      return -1;
+    } else {
+        count = *t++ - '0';
+        while (*t >= '0' && *t < '9') {
+            count *= 10;
+            count += *t++ - '0';
+        }
+    }
+    *ts = t;
+    return count;
+}
+static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
+    int number = __Pyx_BufFmt_ParseNumber(ts);
+    if (number == -1) /* First char was not a digit */
+        PyErr_Format(PyExc_ValueError,\
+                     "Does not understand character buffer dtype format string ('%c')", **ts);
+    return number;
+}
+static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
+  PyErr_Format(PyExc_ValueError,
+               "Unexpected format string character: '%c'", ch);
+}
+static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
+  switch (ch) {
+    case 'c': return "'char'";
+    case 'b': return "'signed char'";
+    case 'B': return "'unsigned char'";
+    case 'h': return "'short'";
+    case 'H': return "'unsigned short'";
+    case 'i': return "'int'";
+    case 'I': return "'unsigned int'";
+    case 'l': return "'long'";
+    case 'L': return "'unsigned long'";
+    case 'q': return "'long long'";
+    case 'Q': return "'unsigned long long'";
+    case 'f': return (is_complex ? "'complex float'" : "'float'");
+    case 'd': return (is_complex ? "'complex double'" : "'double'");
+    case 'g': return (is_complex ? "'complex long double'" : "'long double'");
+    case 'T': return "a struct";
+    case 'O': return "Python object";
+    case 'P': return "a pointer";
+    case 's': case 'p': return "a string";
+    case 0: return "end";
+    default: return "unparseable format string";
+  }
+}
+static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
+  switch (ch) {
+    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return 2;
+    case 'i': case 'I': case 'l': case 'L': return 4;
+    case 'q': case 'Q': return 8;
+    case 'f': return (is_complex ? 8 : 4);
+    case 'd': return (is_complex ? 16 : 8);
+    case 'g': {
+      PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
+      return 0;
+    }
+    case 'O': case 'P': return sizeof(void*);
+    default:
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+}
+static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
+  switch (ch) {
+    case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return sizeof(short);
+    case 'i': case 'I': return sizeof(int);
+    case 'l': case 'L': return sizeof(long);
+    #ifdef HAVE_LONG_LONG
+    case 'q': case 'Q': return sizeof(PY_LONG_LONG);
+    #endif
+    case 'f': return sizeof(float) * (is_complex ? 2 : 1);
+    case 'd': return sizeof(double) * (is_complex ? 2 : 1);
+    case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
+    case 'O': case 'P': return sizeof(void*);
+    default: {
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+  }
+}
+typedef struct { char c; short x; } __Pyx_st_short;
+typedef struct { char c; int x; } __Pyx_st_int;
+typedef struct { char c; long x; } __Pyx_st_long;
+typedef struct { char c; float x; } __Pyx_st_float;
+typedef struct { char c; double x; } __Pyx_st_double;
+typedef struct { char c; long double x; } __Pyx_st_longdouble;
+typedef struct { char c; void *x; } __Pyx_st_void_p;
+#ifdef HAVE_LONG_LONG
+typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
+#endif
+static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
+  switch (ch) {
+    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
+    case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
+    case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
+#ifdef HAVE_LONG_LONG
+    case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
+#endif
+    case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
+    case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
+    case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
+    case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
+    default:
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+}
+/* These are for computing the padding at the end of the struct to align
+   on the first member of the struct. This will probably the same as above,
+   but we don't have any guarantees.
+ */
+typedef struct { short x; char c; } __Pyx_pad_short;
+typedef struct { int x; char c; } __Pyx_pad_int;
+typedef struct { long x; char c; } __Pyx_pad_long;
+typedef struct { float x; char c; } __Pyx_pad_float;
+typedef struct { double x; char c; } __Pyx_pad_double;
+typedef struct { long double x; char c; } __Pyx_pad_longdouble;
+typedef struct { void *x; char c; } __Pyx_pad_void_p;
+#ifdef HAVE_LONG_LONG
+typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
+#endif
+static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
+  switch (ch) {
+    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
+    case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
+    case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
+#ifdef HAVE_LONG_LONG
+    case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
+#endif
+    case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
+    case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
+    case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
+    case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
+    default:
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+}
+static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
+  switch (ch) {
+    case 'c':
+        return 'H';
+    case 'b': case 'h': case 'i':
+    case 'l': case 'q': case 's': case 'p':
+        return 'I';
+    case 'B': case 'H': case 'I': case 'L': case 'Q':
+        return 'U';
+    case 'f': case 'd': case 'g':
+        return (is_complex ? 'C' : 'R');
+    case 'O':
+        return 'O';
+    case 'P':
+        return 'P';
+    default: {
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+  }
+}
+static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
+  if (ctx->head == NULL || ctx->head->field == &ctx->root) {
+    const char* expected;
+    const char* quote;
+    if (ctx->head == NULL) {
+      expected = "end";
+      quote = "";
+    } else {
+      expected = ctx->head->field->type->name;
+      quote = "'";
+    }
+    PyErr_Format(PyExc_ValueError,
+                 "Buffer dtype mismatch, expected %s%s%s but got %s",
+                 quote, expected, quote,
+                 __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
+  } else {
+    __Pyx_StructField* field = ctx->head->field;
+    __Pyx_StructField* parent = (ctx->head - 1)->field;
+    PyErr_Format(PyExc_ValueError,
+                 "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
+                 field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
+                 parent->type->name, field->name);
+  }
+}
+static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
+  char group;
+  size_t size, offset, arraysize = 1;
+  if (ctx->enc_type == 0) return 0;
+  if (ctx->head->field->type->arraysize[0]) {
+    int i, ndim = 0;
+    if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
+        ctx->is_valid_array = ctx->head->field->type->ndim == 1;
+        ndim = 1;
+        if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
+            PyErr_Format(PyExc_ValueError,
+                         "Expected a dimension of size %zu, got %zu",
+                         ctx->head->field->type->arraysize[0], ctx->enc_count);
+            return -1;
+        }
+    }
+    if (!ctx->is_valid_array) {
+      PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
+                   ctx->head->field->type->ndim, ndim);
+      return -1;
+    }
+    for (i = 0; i < ctx->head->field->type->ndim; i++) {
+      arraysize *= ctx->head->field->type->arraysize[i];
+    }
+    ctx->is_valid_array = 0;
+    ctx->enc_count = 1;
+  }
+  group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
+  do {
+    __Pyx_StructField* field = ctx->head->field;
+    __Pyx_TypeInfo* type = field->type;
+    if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
+      size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
+    } else {
+      size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
+    }
+    if (ctx->enc_packmode == '@') {
+      size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
+      size_t align_mod_offset;
+      if (align_at == 0) return -1;
+      align_mod_offset = ctx->fmt_offset % align_at;
+      if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
+      if (ctx->struct_alignment == 0)
+          ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
+                                                                 ctx->is_complex);
+    }
+    if (type->size != size || type->typegroup != group) {
+      if (type->typegroup == 'C' && type->fields != NULL) {
+        size_t parent_offset = ctx->head->parent_offset + field->offset;
+        ++ctx->head;
+        ctx->head->field = type->fields;
+        ctx->head->parent_offset = parent_offset;
+        continue;
+      }
+      if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
+      } else {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return -1;
+      }
+    }
+    offset = ctx->head->parent_offset + field->offset;
+    if (ctx->fmt_offset != offset) {
+      PyErr_Format(PyExc_ValueError,
+                   "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
+                   (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
+      return -1;
+    }
+    ctx->fmt_offset += size;
+    if (arraysize)
+      ctx->fmt_offset += (arraysize - 1) * size;
+    --ctx->enc_count; /* Consume from buffer string */
+    while (1) {
+      if (field == &ctx->root) {
+        ctx->head = NULL;
+        if (ctx->enc_count != 0) {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return -1;
+        }
+        break; /* breaks both loops as ctx->enc_count == 0 */
+      }
+      ctx->head->field = ++field;
+      if (field->type == NULL) {
+        --ctx->head;
+        field = ctx->head->field;
+        continue;
+      } else if (field->type->typegroup == 'S') {
+        size_t parent_offset = ctx->head->parent_offset + field->offset;
+        if (field->type->fields->type == NULL) continue; /* empty struct */
+        field = field->type->fields;
+        ++ctx->head;
+        ctx->head->field = field;
+        ctx->head->parent_offset = parent_offset;
+        break;
+      } else {
+        break;
+      }
+    }
+  } while (ctx->enc_count);
+  ctx->enc_type = 0;
+  ctx->is_complex = 0;
+  return 0;
+}
+static CYTHON_INLINE PyObject *
+__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
+{
+    const char *ts = *tsp;
+    int i = 0, number;
+    int ndim = ctx->head->field->type->ndim;
+;
+    ++ts;
+    if (ctx->new_count != 1) {
+        PyErr_SetString(PyExc_ValueError,
+                        "Cannot handle repeated arrays in format string");
+        return NULL;
+    }
+    if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+    while (*ts && *ts != ')') {
+        switch (*ts) {
+            case ' ': case '\f': case '\r': case '\n': case '\t': case '\v':  continue;
+            default:  break;  /* not a 'break' in the loop */
+        }
+        number = __Pyx_BufFmt_ExpectNumber(&ts);
+        if (number == -1) return NULL;
+        if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
+            return PyErr_Format(PyExc_ValueError,
+                        "Expected a dimension of size %zu, got %d",
+                        ctx->head->field->type->arraysize[i], number);
+        if (*ts != ',' && *ts != ')')
+            return PyErr_Format(PyExc_ValueError,
+                                "Expected a comma in format string, got '%c'", *ts);
+        if (*ts == ',') ts++;
+        i++;
+    }
+    if (i != ndim)
+        return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
+                            ctx->head->field->type->ndim, i);
+    if (!*ts) {
+        PyErr_SetString(PyExc_ValueError,
+                        "Unexpected end of format string, expected ')'");
+        return NULL;
+    }
+    ctx->is_valid_array = 1;
+    ctx->new_count = 1;
+    *tsp = ++ts;
+    return Py_None;
+}
+static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
+  int got_Z = 0;
+  while (1) {
+    switch(*ts) {
+      case 0:
+        if (ctx->enc_type != 0 && ctx->head == NULL) {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return NULL;
+        }
+        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+        if (ctx->head != NULL) {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return NULL;
+        }
+        return ts;
+      case ' ':
+      case '\r':
+      case '\n':
+        ++ts;
+        break;
+      case '<':
+        if (!__Pyx_IsLittleEndian()) {
+          PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
+          return NULL;
+        }
+        ctx->new_packmode = '=';
+        ++ts;
+        break;
+      case '>':
+      case '!':
+        if (__Pyx_IsLittleEndian()) {
+          PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
+          return NULL;
+        }
+        ctx->new_packmode = '=';
+        ++ts;
+        break;
+      case '=':
+      case '@':
+      case '^':
+        ctx->new_packmode = *ts++;
+        break;
+      case 'T': /* substruct */
+        {
+          const char* ts_after_sub;
+          size_t i, struct_count = ctx->new_count;
+          size_t struct_alignment = ctx->struct_alignment;
+          ctx->new_count = 1;
+          ++ts;
+          if (*ts != '{') {
+            PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
+            return NULL;
+          }
+          if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+          ctx->enc_type = 0; /* Erase processed last struct element */
+          ctx->enc_count = 0;
+          ctx->struct_alignment = 0;
+          ++ts;
+          ts_after_sub = ts;
+          for (i = 0; i != struct_count; ++i) {
+            ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
+            if (!ts_after_sub) return NULL;
+          }
+          ts = ts_after_sub;
+          if (struct_alignment) ctx->struct_alignment = struct_alignment;
+        }
+        break;
+      case '}': /* end of substruct; either repeat or move on */
+        {
+          size_t alignment = ctx->struct_alignment;
+          ++ts;
+          if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+          ctx->enc_type = 0; /* Erase processed last struct element */
+          if (alignment && ctx->fmt_offset % alignment) {
+            ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
+          }
+        }
+        return ts;
+      case 'x':
+        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+        ctx->fmt_offset += ctx->new_count;
+        ctx->new_count = 1;
+        ctx->enc_count = 0;
+        ctx->enc_type = 0;
+        ctx->enc_packmode = ctx->new_packmode;
+        ++ts;
+        break;
+      case 'Z':
+        got_Z = 1;
+        ++ts;
+        if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
+          __Pyx_BufFmt_RaiseUnexpectedChar('Z');
+          return NULL;
+        }
+      case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
+      case 'l': case 'L': case 'q': case 'Q':
+      case 'f': case 'd': case 'g':
+      case 'O': case 'p':
+        if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
+            ctx->enc_packmode == ctx->new_packmode) {
+          ctx->enc_count += ctx->new_count;
+          ctx->new_count = 1;
+          got_Z = 0;
+          ++ts;
+          break;
+        }
+      case 's':
+        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+        ctx->enc_count = ctx->new_count;
+        ctx->enc_packmode = ctx->new_packmode;
+        ctx->enc_type = *ts;
+        ctx->is_complex = got_Z;
+        ++ts;
+        ctx->new_count = 1;
+        got_Z = 0;
+        break;
+      case ':':
+        ++ts;
+        while(*ts != ':') ++ts;
+        ++ts;
+        break;
+      case '(':
+        if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
+        break;
+      default:
+        {
+          int number = __Pyx_BufFmt_ExpectNumber(&ts);
+          if (number == -1) return NULL;
+          ctx->new_count = (size_t)number;
+        }
+    }
+  }
+}
+static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
+  buf->buf = NULL;
+  buf->obj = NULL;
+  buf->strides = __Pyx_zeros;
+  buf->shape = __Pyx_zeros;
+  buf->suboffsets = __Pyx_minusones;
+}
+static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
+        Py_buffer* buf, PyObject* obj,  __Pyx_TypeInfo* dtype, int flags,
+        int nd, int cast, __Pyx_BufFmt_StackElem* stack)
+{
+  if (obj == Py_None || obj == NULL) {
+    __Pyx_ZeroBuffer(buf);
+    return 0;
+  }
+  buf->buf = NULL;
+  if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
+  if (buf->ndim != nd) {
+    PyErr_Format(PyExc_ValueError,
+                 "Buffer has wrong number of dimensions (expected %d, got %d)",
+                 nd, buf->ndim);
+    goto fail;
+  }
+  if (!cast) {
+    __Pyx_BufFmt_Context ctx;
+    __Pyx_BufFmt_Init(&ctx, stack, dtype);
+    if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
+  }
+  if ((unsigned)buf->itemsize != dtype->size) {
+    PyErr_Format(PyExc_ValueError,
+      "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
+      buf->itemsize, (buf->itemsize > 1) ? "s" : "",
+      dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
+    goto fail;
+  }
+  if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
+  return 0;
+fail:;
+  __Pyx_ZeroBuffer(buf);
+  return -1;
+}
+static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
+  if (info->buf == NULL) return;
+  if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
+  __Pyx_ReleaseBuffer(info);
+}
+
+static void __Pyx_RaiseBufferFallbackError(void) {
+  PyErr_SetString(PyExc_ValueError,
+     "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
+}
+
+static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
+    Py_ssize_t q = a / b;
+    Py_ssize_t r = a - q*b;
+    q -= ((r != 0) & ((r ^ b) < 0));
+    return q;
+}
+
+static void __Pyx_RaiseBufferIndexError(int axis) {
+  PyErr_Format(PyExc_IndexError,
+     "Out of bounds on buffer access (axis %d)", axis);
+}
+
+static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value) {
+    PyObject* value;
+#if PY_MAJOR_VERSION >= 3
+    value = PyDict_GetItemWithError(d, key);
+    if (unlikely(!value)) {
+        if (unlikely(PyErr_Occurred()))
+            return NULL;
+        value = default_value;
+    }
+    Py_INCREF(value);
+#else
+    if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) {
+        value = PyDict_GetItem(d, key);
+        if (unlikely(!value)) {
+            value = default_value;
+        }
+        Py_INCREF(value);
+    } else {
+        if (default_value == Py_None)
+            default_value = NULL;
+        value = PyObject_CallMethodObjArgs(
+            d, __pyx_n_s_get, key, default_value, NULL);
+    }
+#endif
+    return value;
+}
+
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
+    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+}
+
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
+    PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+    PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+    if (!ob)
+        goto bad;
+    if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
+        goto bad;
+    Py_DECREF(ob);
+    return 0;
+bad:
+    Py_XDECREF(ob);
+    return -1;
+}
+
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+    PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+    if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+        PyErr_Format(PyExc_ImportError,
+        #if PY_MAJOR_VERSION < 3
+            "cannot import name %.230s", PyString_AS_STRING(name));
+        #else
+            "cannot import name %S", name);
+        #endif
+    }
+    return value;
+}
+
+static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name) {
+    PyObject *result;
+    result = __Pyx_PyObject_GetAttrStr(nmspace, name);
+    if (!result)
+        result = __Pyx_GetModuleGlobalName(name);
+    return result;
+}
+
+#if PY_MAJOR_VERSION < 3
+static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
+  #if PY_VERSION_HEX >= 0x02060000
+    if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
+  #endif
+        if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
+  #if PY_VERSION_HEX < 0x02060000
+    if (obj->ob_type->tp_dict) {
+        PyObject *getbuffer_cobj = PyObject_GetItem(
+            obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer);
+        if (getbuffer_cobj) {
+            getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj);
+            Py_DECREF(getbuffer_cobj);
+            if (!func)
+                goto fail;
+            return func(obj, view, flags);
+        } else {
+            PyErr_Clear();
+        }
+    }
+  #endif
+    PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
+#if PY_VERSION_HEX < 0x02060000
+fail:
+#endif
+    return -1;
+}
+static void __Pyx_ReleaseBuffer(Py_buffer *view) {
+    PyObject *obj = view->obj;
+    if (!obj) return;
+  #if PY_VERSION_HEX >= 0x02060000
+    if (PyObject_CheckBuffer(obj)) {
+        PyBuffer_Release(view);
+        return;
+    }
+  #endif
+        if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
+  #if PY_VERSION_HEX < 0x02060000
+    if (obj->ob_type->tp_dict) {
+        PyObject *releasebuffer_cobj = PyObject_GetItem(
+            obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer);
+        if (releasebuffer_cobj) {
+            releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj);
+            Py_DECREF(releasebuffer_cobj);
+            if (!func)
+                goto fail;
+            func(obj, view);
+            return;
+        } else {
+            PyErr_Clear();
+        }
+    }
+  #endif
+    goto nofail;
+#if PY_VERSION_HEX < 0x02060000
+fail:
+#endif
+    PyErr_WriteUnraisable(obj);
+nofail:
+    Py_DECREF(obj);
+    view->obj = NULL;
+}
+#endif /*  PY_MAJOR_VERSION < 3 */
+
+
+        static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+    PyObject *empty_list = 0;
+    PyObject *module = 0;
+    PyObject *global_dict = 0;
+    PyObject *empty_dict = 0;
+    PyObject *list;
+    #if PY_VERSION_HEX < 0x03030000
+    PyObject *py_import;
+    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+    if (!py_import)
+        goto bad;
+    #endif
+    if (from_list)
+        list = from_list;
+    else {
+        empty_list = PyList_New(0);
+        if (!empty_list)
+            goto bad;
+        list = empty_list;
+    }
+    global_dict = PyModule_GetDict(__pyx_m);
+    if (!global_dict)
+        goto bad;
+    empty_dict = PyDict_New();
+    if (!empty_dict)
+        goto bad;
+    #if PY_VERSION_HEX >= 0x02050000
+    {
+        #if PY_MAJOR_VERSION >= 3
+        if (level == -1) {
+            if (strchr(__Pyx_MODULE_NAME, '.')) {
+                #if PY_VERSION_HEX < 0x03030000
+                PyObject *py_level = PyInt_FromLong(1);
+                if (!py_level)
+                    goto bad;
+                module = PyObject_CallFunctionObjArgs(py_import,
+                    name, global_dict, empty_dict, list, py_level, NULL);
+                Py_DECREF(py_level);
+                #else
+                module = PyImport_ImportModuleLevelObject(
+                    name, global_dict, empty_dict, list, 1);
+                #endif
+                if (!module) {
+                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
+                        goto bad;
+                    PyErr_Clear();
+                }
+            }
+            level = 0; /* try absolute import on failure */
+        }
+        #endif
+        if (!module) {
+            #if PY_VERSION_HEX < 0x03030000
+            PyObject *py_level = PyInt_FromLong(level);
+            if (!py_level)
+                goto bad;
+            module = PyObject_CallFunctionObjArgs(py_import,
+                name, global_dict, empty_dict, list, py_level, NULL);
+            Py_DECREF(py_level);
+            #else
+            module = PyImport_ImportModuleLevelObject(
+                name, global_dict, empty_dict, list, level);
+            #endif
+        }
+    }
+    #else
+    if (level>0) {
+        PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
+        goto bad;
+    }
+    module = PyObject_CallFunctionObjArgs(py_import,
+        name, global_dict, empty_dict, list, NULL);
+    #endif
+bad:
+    #if PY_VERSION_HEX < 0x03030000
+    Py_XDECREF(py_import);
+    #endif
+    Py_XDECREF(empty_list);
+    Py_XDECREF(empty_dict);
+    return module;
+}
+
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func)             \
+    {                                                                     \
+        func_type value = func(x);                                        \
+        if (sizeof(target_type) < sizeof(func_type)) {                    \
+            if (unlikely(value != (func_type) (target_type) value)) {     \
+                func_type zero = 0;                                       \
+                PyErr_SetString(PyExc_OverflowError,                      \
+                    (is_unsigned && unlikely(value < zero)) ?             \
+                    "can't convert negative value to " #target_type :     \
+                    "value too large to convert to " #target_type);       \
+                return (target_type) -1;                                  \
+            }                                                             \
+        }                                                                 \
+        return (target_type) value;                                       \
+    }
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+    const int neg_one = (int) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(int) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to int");
+                return (int) -1;
+            }
+            return (int) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(int)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (int) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to int");
+                return (int) -1;
+            }
+            if (sizeof(int) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(int) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(int)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(int) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(int) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(int) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong)
+            } else if (sizeof(int) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            int val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (int) -1;
+        }
+    } else {
+        int val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (int) -1;
+        val = __Pyx_PyInt_As_int(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint16(npy_uint16 value) {
+    const npy_uint16 neg_one = (npy_uint16) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(npy_uint16) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(npy_uint16) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(npy_uint16) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(npy_uint16) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(npy_uint16) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(npy_uint16),
+                                     little, !is_unsigned);
+    }
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
+    const int neg_one = (int) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(int) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(int) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(int) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(int) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(int) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(int),
+                                     little, !is_unsigned);
+    }
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int32(npy_int32 value) {
+    const npy_int32 neg_one = (npy_int32) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(npy_int32) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(npy_int32) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(npy_int32) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(npy_int32) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(npy_int32) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(npy_int32),
+                                     little, !is_unsigned);
+    }
+}
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
+    const npy_int32 neg_one = (npy_int32) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(npy_int32) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_int32");
+                return (npy_int32) -1;
+            }
+            return (npy_int32) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_int32)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (npy_int32) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_int32");
+                return (npy_int32) -1;
+            }
+            if (sizeof(npy_int32) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(npy_int32) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_int32)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(npy_int32) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(npy_int32) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(npy_int32) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int32, long, PyLong_AsLong)
+            } else if (sizeof(npy_int32) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int32, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            npy_int32 val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (npy_int32) -1;
+        }
+    } else {
+        npy_int32 val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (npy_int32) -1;
+        val = __Pyx_PyInt_As_npy_int32(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+    const long neg_one = (long) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(long) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(long) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(long) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(long) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(long) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(long),
+                                     little, !is_unsigned);
+    }
+}
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *x) {
+    const npy_uint8 neg_one = (npy_uint8) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(npy_uint8) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(npy_uint8, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_uint8");
+                return (npy_uint8) -1;
+            }
+            return (npy_uint8) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_uint8)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (npy_uint8) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_uint8");
+                return (npy_uint8) -1;
+            }
+            if (sizeof(npy_uint8) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(npy_uint8) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_uint8)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(npy_uint8) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(npy_uint8) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(npy_uint8) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint8, long, PyLong_AsLong)
+            } else if (sizeof(npy_uint8) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint8, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            npy_uint8 val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (npy_uint8) -1;
+        }
+    } else {
+        npy_uint8 val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (npy_uint8) -1;
+        val = __Pyx_PyInt_As_npy_uint8(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *x) {
+    const npy_uint16 neg_one = (npy_uint16) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(npy_uint16) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(npy_uint16, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_uint16");
+                return (npy_uint16) -1;
+            }
+            return (npy_uint16) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_uint16)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (npy_uint16) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_uint16");
+                return (npy_uint16) -1;
+            }
+            if (sizeof(npy_uint16) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(npy_uint16) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_uint16)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(npy_uint16) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(npy_uint16) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(npy_uint16) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint16, long, PyLong_AsLong)
+            } else if (sizeof(npy_uint16) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint16, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            npy_uint16 val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (npy_uint16) -1;
+        }
+    } else {
+        npy_uint16 val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (npy_uint16) -1;
+        val = __Pyx_PyInt_As_npy_uint16(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
+    const npy_int8 neg_one = (npy_int8) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(npy_int8) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(npy_int8, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_int8");
+                return (npy_int8) -1;
+            }
+            return (npy_int8) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_int8)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (npy_int8) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to npy_int8");
+                return (npy_int8) -1;
+            }
+            if (sizeof(npy_int8) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(npy_int8) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(npy_int8)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(npy_int8) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(npy_int8) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(npy_int8) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int8, long, PyLong_AsLong)
+            } else if (sizeof(npy_int8) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(npy_int8, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            npy_int8 val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (npy_int8) -1;
+        }
+    } else {
+        npy_int8 val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (npy_int8) -1;
+        val = __Pyx_PyInt_As_npy_int8(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
+      return ::std::complex< float >(x, y);
+    }
+  #else
+    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
+      return x + y*(__pyx_t_float_complex)_Complex_I;
+    }
+  #endif
+#else
+    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
+      __pyx_t_float_complex z;
+      z.real = x;
+      z.imag = y;
+      return z;
+    }
+#endif
+
+#if CYTHON_CCOMPLEX
+#else
+    static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+       return (a.real == b.real) && (a.imag == b.imag);
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        z.real = a.real + b.real;
+        z.imag = a.imag + b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        z.real = a.real - b.real;
+        z.imag = a.imag - b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        z.real = a.real * b.real - a.imag * b.imag;
+        z.imag = a.real * b.imag + a.imag * b.real;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        float denom = b.real * b.real + b.imag * b.imag;
+        z.real = (a.real * b.real + a.imag * b.imag) / denom;
+        z.imag = (a.imag * b.real - a.real * b.imag) / denom;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
+        __pyx_t_float_complex z;
+        z.real = -a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
+       return (a.real == 0) && (a.imag == 0);
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
+        __pyx_t_float_complex z;
+        z.real =  a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    #if 1
+        static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
+          #if !defined(HAVE_HYPOT) || defined(_MSC_VER)
+            return sqrtf(z.real*z.real + z.imag*z.imag);
+          #else
+            return hypotf(z.real, z.imag);
+          #endif
+        }
+        static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+            __pyx_t_float_complex z;
+            float r, lnr, theta, z_r, z_theta;
+            if (b.imag == 0 && b.real == (int)b.real) {
+                if (b.real < 0) {
+                    float denom = a.real * a.real + a.imag * a.imag;
+                    a.real = a.real / denom;
+                    a.imag = -a.imag / denom;
+                    b.real = -b.real;
+                }
+                switch ((int)b.real) {
+                    case 0:
+                        z.real = 1;
+                        z.imag = 0;
+                        return z;
+                    case 1:
+                        return a;
+                    case 2:
+                        z = __Pyx_c_prodf(a, a);
+                        return __Pyx_c_prodf(a, a);
+                    case 3:
+                        z = __Pyx_c_prodf(a, a);
+                        return __Pyx_c_prodf(z, a);
+                    case 4:
+                        z = __Pyx_c_prodf(a, a);
+                        return __Pyx_c_prodf(z, z);
+                }
+            }
+            if (a.imag == 0) {
+                if (a.real == 0) {
+                    return a;
+                }
+                r = a.real;
+                theta = 0;
+            } else {
+                r = __Pyx_c_absf(a);
+                theta = atan2f(a.imag, a.real);
+            }
+            lnr = logf(r);
+            z_r = expf(lnr * b.real - theta * b.imag);
+            z_theta = theta * b.real + lnr * b.imag;
+            z.real = z_r * cosf(z_theta);
+            z.imag = z_r * sinf(z_theta);
+            return z;
+        }
+    #endif
+#endif
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
+      return ::std::complex< double >(x, y);
+    }
+  #else
+    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
+      return x + y*(__pyx_t_double_complex)_Complex_I;
+    }
+  #endif
+#else
+    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
+      __pyx_t_double_complex z;
+      z.real = x;
+      z.imag = y;
+      return z;
+    }
+#endif
+
+#if CYTHON_CCOMPLEX
+#else
+    static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+       return (a.real == b.real) && (a.imag == b.imag);
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        z.real = a.real + b.real;
+        z.imag = a.imag + b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        z.real = a.real - b.real;
+        z.imag = a.imag - b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        z.real = a.real * b.real - a.imag * b.imag;
+        z.imag = a.real * b.imag + a.imag * b.real;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        double denom = b.real * b.real + b.imag * b.imag;
+        z.real = (a.real * b.real + a.imag * b.imag) / denom;
+        z.imag = (a.imag * b.real - a.real * b.imag) / denom;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
+        __pyx_t_double_complex z;
+        z.real = -a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
+       return (a.real == 0) && (a.imag == 0);
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
+        __pyx_t_double_complex z;
+        z.real =  a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    #if 1
+        static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
+          #if !defined(HAVE_HYPOT) || defined(_MSC_VER)
+            return sqrt(z.real*z.real + z.imag*z.imag);
+          #else
+            return hypot(z.real, z.imag);
+          #endif
+        }
+        static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+            __pyx_t_double_complex z;
+            double r, lnr, theta, z_r, z_theta;
+            if (b.imag == 0 && b.real == (int)b.real) {
+                if (b.real < 0) {
+                    double denom = a.real * a.real + a.imag * a.imag;
+                    a.real = a.real / denom;
+                    a.imag = -a.imag / denom;
+                    b.real = -b.real;
+                }
+                switch ((int)b.real) {
+                    case 0:
+                        z.real = 1;
+                        z.imag = 0;
+                        return z;
+                    case 1:
+                        return a;
+                    case 2:
+                        z = __Pyx_c_prod(a, a);
+                        return __Pyx_c_prod(a, a);
+                    case 3:
+                        z = __Pyx_c_prod(a, a);
+                        return __Pyx_c_prod(z, a);
+                    case 4:
+                        z = __Pyx_c_prod(a, a);
+                        return __Pyx_c_prod(z, z);
+                }
+            }
+            if (a.imag == 0) {
+                if (a.real == 0) {
+                    return a;
+                }
+                r = a.real;
+                theta = 0;
+            } else {
+                r = __Pyx_c_abs(a);
+                theta = atan2(a.imag, a.real);
+            }
+            lnr = log(r);
+            z_r = exp(lnr * b.real - theta * b.imag);
+            z_theta = theta * b.real + lnr * b.imag;
+            z.real = z_r * cos(z_theta);
+            z.imag = z_r * sin(z_theta);
+            return z;
+        }
+    #endif
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+    const long neg_one = (long) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(long) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to long");
+                return (long) -1;
+            }
+            return (long) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(long)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (long) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to long");
+                return (long) -1;
+            }
+            if (sizeof(long) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(long) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(long)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(long) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(long) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(long) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong)
+            } else if (sizeof(long) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            long val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (long) -1;
+        }
+    } else {
+        long val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (long) -1;
+        val = __Pyx_PyInt_As_long(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+static int __Pyx_check_binary_version(void) {
+    char ctversion[4], rtversion[4];
+    PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+    PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+    if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+        char message[200];
+        PyOS_snprintf(message, sizeof(message),
+                      "compiletime version %s of module '%.100s' "
+                      "does not match runtime version %s",
+                      ctversion, __Pyx_MODULE_NAME, rtversion);
+        #if PY_VERSION_HEX < 0x02050000
+        return PyErr_Warn(NULL, message);
+        #else
+        return PyErr_WarnEx(NULL, message, 1);
+        #endif
+    }
+    return 0;
+}
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(const char *name) {
+    PyObject *py_name = 0;
+    PyObject *py_module = 0;
+    py_name = __Pyx_PyIdentifier_FromString(name);
+    if (!py_name)
+        goto bad;
+    py_module = PyImport_Import(py_name);
+    Py_DECREF(py_name);
+    return py_module;
+bad:
+    Py_XDECREF(py_name);
+    return 0;
+}
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
+    size_t size, int strict)
+{
+    PyObject *py_module = 0;
+    PyObject *result = 0;
+    PyObject *py_name = 0;
+    char warning[200];
+    Py_ssize_t basicsize;
+#ifdef Py_LIMITED_API
+    PyObject *py_basicsize;
+#endif
+    py_module = __Pyx_ImportModule(module_name);
+    if (!py_module)
+        goto bad;
+    py_name = __Pyx_PyIdentifier_FromString(class_name);
+    if (!py_name)
+        goto bad;
+    result = PyObject_GetAttr(py_module, py_name);
+    Py_DECREF(py_name);
+    py_name = 0;
+    Py_DECREF(py_module);
+    py_module = 0;
+    if (!result)
+        goto bad;
+    if (!PyType_Check(result)) {
+        PyErr_Format(PyExc_TypeError,
+            "%.200s.%.200s is not a type object",
+            module_name, class_name);
+        goto bad;
+    }
+#ifndef Py_LIMITED_API
+    basicsize = ((PyTypeObject *)result)->tp_basicsize;
+#else
+    py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
+    if (!py_basicsize)
+        goto bad;
+    basicsize = PyLong_AsSsize_t(py_basicsize);
+    Py_DECREF(py_basicsize);
+    py_basicsize = 0;
+    if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
+        goto bad;
+#endif
+    if (!strict && (size_t)basicsize > size) {
+        PyOS_snprintf(warning, sizeof(warning),
+            "%s.%s size changed, may indicate binary incompatibility",
+            module_name, class_name);
+        #if PY_VERSION_HEX < 0x02050000
+        if (PyErr_Warn(NULL, warning) < 0) goto bad;
+        #else
+        if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
+        #endif
+    }
+    else if ((size_t)basicsize != size) {
+        PyErr_Format(PyExc_ValueError,
+            "%.200s.%.200s has the wrong size, try recompiling",
+            module_name, class_name);
+        goto bad;
+    }
+    return (PyTypeObject *)result;
+bad:
+    Py_XDECREF(py_module);
+    Py_XDECREF(result);
+    return NULL;
+}
+#endif
+
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+    int start = 0, mid = 0, end = count - 1;
+    if (end >= 0 && code_line > entries[end].code_line) {
+        return count;
+    }
+    while (start < end) {
+        mid = (start + end) / 2;
+        if (code_line < entries[mid].code_line) {
+            end = mid;
+        } else if (code_line > entries[mid].code_line) {
+             start = mid + 1;
+        } else {
+            return mid;
+        }
+    }
+    if (code_line <= entries[mid].code_line) {
+        return mid;
+    } else {
+        return mid + 1;
+    }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+    PyCodeObject* code_object;
+    int pos;
+    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+        return NULL;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+        return NULL;
+    }
+    code_object = __pyx_code_cache.entries[pos].code_object;
+    Py_INCREF(code_object);
+    return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+    int pos, i;
+    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+    if (unlikely(!code_line)) {
+        return;
+    }
+    if (unlikely(!entries)) {
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (likely(entries)) {
+            __pyx_code_cache.entries = entries;
+            __pyx_code_cache.max_count = 64;
+            __pyx_code_cache.count = 1;
+            entries[0].code_line = code_line;
+            entries[0].code_object = code_object;
+            Py_INCREF(code_object);
+        }
+        return;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+        PyCodeObject* tmp = entries[pos].code_object;
+        entries[pos].code_object = code_object;
+        Py_DECREF(tmp);
+        return;
+    }
+    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+        int new_max = __pyx_code_cache.max_count + 64;
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+            __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (unlikely(!entries)) {
+            return;
+        }
+        __pyx_code_cache.entries = entries;
+        __pyx_code_cache.max_count = new_max;
+    }
+    for (i=__pyx_code_cache.count; i>pos; i--) {
+        entries[i] = entries[i-1];
+    }
+    entries[pos].code_line = code_line;
+    entries[pos].code_object = code_object;
+    __pyx_code_cache.count++;
+    Py_INCREF(code_object);
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+            const char *funcname, int c_line,
+            int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyObject *py_srcfile = 0;
+    PyObject *py_funcname = 0;
+    #if PY_MAJOR_VERSION < 3
+    py_srcfile = PyString_FromString(filename);
+    #else
+    py_srcfile = PyUnicode_FromString(filename);
+    #endif
+    if (!py_srcfile) goto bad;
+    if (c_line) {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #else
+        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #endif
+    }
+    else {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromString(funcname);
+        #else
+        py_funcname = PyUnicode_FromString(funcname);
+        #endif
+    }
+    if (!py_funcname) goto bad;
+    py_code = __Pyx_PyCode_New(
+        0,            /*int argcount,*/
+        0,            /*int kwonlyargcount,*/
+        0,            /*int nlocals,*/
+        0,            /*int stacksize,*/
+        0,            /*int flags,*/
+        __pyx_empty_bytes, /*PyObject *code,*/
+        __pyx_empty_tuple, /*PyObject *consts,*/
+        __pyx_empty_tuple, /*PyObject *names,*/
+        __pyx_empty_tuple, /*PyObject *varnames,*/
+        __pyx_empty_tuple, /*PyObject *freevars,*/
+        __pyx_empty_tuple, /*PyObject *cellvars,*/
+        py_srcfile,   /*PyObject *filename,*/
+        py_funcname,  /*PyObject *name,*/
+        py_line,      /*int firstlineno,*/
+        __pyx_empty_bytes  /*PyObject *lnotab*/
+    );
+    Py_DECREF(py_srcfile);
+    Py_DECREF(py_funcname);
+    return py_code;
+bad:
+    Py_XDECREF(py_srcfile);
+    Py_XDECREF(py_funcname);
+    return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyObject *py_globals = 0;
+    PyFrameObject *py_frame = 0;
+    py_code = __pyx_find_code_object(c_line ? c_line : py_line);
+    if (!py_code) {
+        py_code = __Pyx_CreateCodeObjectForTraceback(
+            funcname, c_line, py_line, filename);
+        if (!py_code) goto bad;
+        __pyx_insert_code_object(c_line ? c_line : py_line, py_code);
+    }
+    py_globals = PyModule_GetDict(__pyx_m);
+    if (!py_globals) goto bad;
+    py_frame = PyFrame_New(
+        PyThreadState_GET(), /*PyThreadState *tstate,*/
+        py_code,             /*PyCodeObject *code,*/
+        py_globals,          /*PyObject *globals,*/
+        0                    /*PyObject *locals*/
+    );
+    if (!py_frame) goto bad;
+    py_frame->f_lineno = py_line;
+    PyTraceBack_Here(py_frame);
+bad:
+    Py_XDECREF(py_code);
+    Py_XDECREF(py_frame);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+    while (t->p) {
+        #if PY_MAJOR_VERSION < 3
+        if (t->is_unicode) {
+            *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+        } else if (t->intern) {
+            *t->p = PyString_InternFromString(t->s);
+        } else {
+            *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+        }
+        #else  /* Python 3+ has unicode identifiers */
+        if (t->is_unicode | t->is_str) {
+            if (t->intern) {
+                *t->p = PyUnicode_InternFromString(t->s);
+            } else if (t->encoding) {
+                *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+            } else {
+                *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+            }
+        } else {
+            *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+        }
+        #endif
+        if (!*t->p)
+            return -1;
+        ++t;
+    }
+    return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+    return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
+    Py_ssize_t ignore;
+    return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+    if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+            __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+            PyUnicode_Check(o)) {
+#if PY_VERSION_HEX < 0x03030000
+        char* defenc_c;
+        PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+        if (!defenc) return NULL;
+        defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+        {
+            char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+            char* c;
+            for (c = defenc_c; c < end; c++) {
+                if ((unsigned char) (*c) >= 128) {
+                    PyUnicode_AsASCIIString(o);
+                    return NULL;
+                }
+            }
+        }
+#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
+        *length = PyBytes_GET_SIZE(defenc);
+        return defenc_c;
+#else /* PY_VERSION_HEX < 0x03030000 */
+        if (PyUnicode_READY(o) == -1) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+        if (PyUnicode_IS_ASCII(o)) {
+            *length = PyUnicode_GET_LENGTH(o);
+            return PyUnicode_AsUTF8(o);
+        } else {
+            PyUnicode_AsASCIIString(o);
+            return NULL;
+        }
+#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+        return PyUnicode_AsUTF8AndSize(o, length);
+#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+#endif /* PY_VERSION_HEX < 0x03030000 */
+    } else
+#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII  || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
+#if !CYTHON_COMPILING_IN_PYPY
+#if PY_VERSION_HEX >= 0x02060000
+    if (PyByteArray_Check(o)) {
+        *length = PyByteArray_GET_SIZE(o);
+        return PyByteArray_AS_STRING(o);
+    } else
+#endif
+#endif
+    {
+        char* result;
+        int r = PyBytes_AsStringAndSize(o, &result, length);
+        if (unlikely(r < 0)) {
+            return NULL;
+        } else {
+            return result;
+        }
+    }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+   int is_true = x == Py_True;
+   if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+   else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
+  PyNumberMethods *m;
+  const char *name = NULL;
+  PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+  if (PyInt_Check(x) || PyLong_Check(x))
+#else
+  if (PyLong_Check(x))
+#endif
+    return Py_INCREF(x), x;
+  m = Py_TYPE(x)->tp_as_number;
+#if PY_MAJOR_VERSION < 3
+  if (m && m->nb_int) {
+    name = "int";
+    res = PyNumber_Int(x);
+  }
+  else if (m && m->nb_long) {
+    name = "long";
+    res = PyNumber_Long(x);
+  }
+#else
+  if (m && m->nb_int) {
+    name = "int";
+    res = PyNumber_Long(x);
+  }
+#endif
+  if (res) {
+#if PY_MAJOR_VERSION < 3
+    if (!PyInt_Check(res) && !PyLong_Check(res)) {
+#else
+    if (!PyLong_Check(res)) {
+#endif
+      PyErr_Format(PyExc_TypeError,
+                   "__%.4s__ returned non-%.4s (type %.200s)",
+                   name, name, Py_TYPE(res)->tp_name);
+      Py_DECREF(res);
+      return NULL;
+    }
+  }
+  else if (!PyErr_Occurred()) {
+    PyErr_SetString(PyExc_TypeError,
+                    "an integer is required");
+  }
+  return res;
+}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+  Py_ssize_t ival;
+  PyObject *x;
+#if PY_MAJOR_VERSION < 3
+  if (likely(PyInt_CheckExact(b)))
+      return PyInt_AS_LONG(b);
+#endif
+  if (likely(PyLong_CheckExact(b))) {
+    #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+     #if CYTHON_USE_PYLONG_INTERNALS
+       switch (Py_SIZE(b)) {
+       case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
+       case  0: return 0;
+       case  1: return ((PyLongObject*)b)->ob_digit[0];
+       }
+     #endif
+    #endif
+  #if PY_VERSION_HEX < 0x02060000
+    return PyInt_AsSsize_t(b);
+  #else
+    return PyLong_AsSsize_t(b);
+  #endif
+  }
+  x = PyNumber_Index(b);
+  if (!x) return -1;
+  ival = PyInt_AsSsize_t(x);
+  Py_DECREF(x);
+  return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+#if PY_VERSION_HEX < 0x02050000
+   if (ival <= LONG_MAX)
+       return PyInt_FromLong((long)ival);
+   else {
+       unsigned char *bytes = (unsigned char *) &ival;
+       int one = 1; int little = (int)*(unsigned char*)&one;
+       return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
+   }
+#else
+   return PyInt_FromSize_t(ival);
+#endif
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/skbio/alignment/_ssw_wrapper.pyx b/skbio/alignment/_ssw_wrapper.pyx
new file mode 100644
index 0000000..1c43e97
--- /dev/null
+++ b/skbio/alignment/_ssw_wrapper.pyx
@@ -0,0 +1,804 @@
+# -----------------------------------------------------------------------------
+#  Copyright (c) 2013--, scikit-bio development team.
+#
+#  Distributed under the terms of the Modified BSD License.
+#
+#  The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from cpython cimport bool
+import numpy as np
+cimport numpy as cnp
+from skbio.alignment import Alignment
+from skbio.sequence import ProteinSequence, NucleotideSequence
+
+cdef extern from "_lib/ssw.h":
+
+    ctypedef struct s_align:
+        cnp.uint16_t score1
+        cnp.uint16_t score2
+        cnp.int32_t ref_begin1
+        cnp.int32_t ref_end1
+        cnp.int32_t read_begin1
+        cnp.int32_t read_end1
+        cnp.int32_t ref_end2
+        cnp.uint32_t* cigar
+        cnp.int32_t cigarLen
+
+    ctypedef struct s_profile:
+        pass
+
+    cdef s_profile* ssw_init(const cnp.int8_t* read,
+                             const cnp.int32_t readLen,
+                             const cnp.int8_t* mat,
+                             const cnp.int32_t n,
+                             const cnp.int8_t score_size)
+
+    cdef void init_destroy(s_profile* p)
+
+    cdef s_align* ssw_align(const s_profile* prof,
+                            const cnp.int8_t* ref,
+                            cnp.int32_t refLen,
+                            const cnp.uint8_t weight_gapO,
+                            const cnp.uint8_t weight_gapE,
+                            const cnp.uint8_t flag,
+                            const cnp.uint16_t filters,
+                            const cnp.int32_t filterd,
+                            const cnp.int32_t maskLen)
+
+    cdef void align_destroy(s_align* a)
+
+np_aa_table = np.array([
+    23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+    23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+    23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+    23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+    23,  0, 20,  4,  3,  6, 13,  7,  8,  9, 23, 11, 10, 12,  2, 23,
+    14,  5,  1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23,
+    23,  0, 20,  4,  3,  6, 13,  7,  8,  9, 23, 11, 10, 12,  2, 23,
+    14,  5,  1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23])
+
+np_nt_table = np.array([
+    4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
+    4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
+    4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
+    4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
+    4,  0,  4,  1,  4,  4,  4,  2,  4,  4,  4,  4,  4,  4,  4,  4,
+    4,  4,  4,  4,  3,  0,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
+    4,  0,  4,  1,  4,  4,  4,  2,  4,  4,  4,  4,  4,  4,  4,  4,
+    4,  4,  4,  4,  3,  0,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4])
+
+mid_table = np.array(['M', 'I', 'D'])
+
+
+cdef class AlignmentStructure:
+    """Wraps the result of an alignment c struct so it is accessible to Python
+
+    Attributes
+    ----------
+    optimal_alignment_score
+    suboptimal_alignment_score
+    target_begin
+    target_end_optimal
+    target_end_suboptimal
+    query_begin
+    query_end
+    cigar
+    query_sequence
+    target_sequence
+    aligned_query_sequence
+    aligned_target_sequence
+
+    Notes
+    -----
+    `cigar` may be empty depending on parameters used.
+
+    `target_begin` and `query_begin` may be -1 depending on parameters used.
+
+    Developer note: `read_sequence` is an alias for `query_sequence` used by
+    ssw.c as is `reference_sequence` for `target_sequence`
+    """
+    cdef s_align *p
+    cdef str read_sequence
+    cdef str reference_sequence
+    cdef int index_starts_at
+    cdef str _cigar_string
+
+    def __cinit__(self, read_sequence, reference_sequence, index_starts_at):
+        # We use `read_sequence` and `reference_sequence` here as they are
+        # treated sematically as a private output of ssw.c like the `s_align`
+        # struct
+        self.read_sequence = read_sequence
+        self.reference_sequence = reference_sequence
+        self.index_starts_at = index_starts_at
+
+    cdef __constructor(self, s_align* pointer):
+        self.p = pointer
+
+    def __dealloc__(self):
+        if self.p is not NULL:
+            align_destroy(self.p)
+
+    def __getitem__(self, key):
+        return getattr(self, key)
+
+    def __repr__(self):
+        data = ['optimal_alignment_score', 'suboptimal_alignment_score',
+                'query_begin', 'query_end', 'target_begin',
+                'target_end_optimal', 'target_end_suboptimal', 'cigar',
+                'query_sequence', 'target_sequence']
+        return "{\n%s\n}" % ',\n'.join([
+            "    {!r}: {!r}".format(k, self[k]) for k in data])
+
+    def __str__(self):
+        score = "Score: %d" % self.optimal_alignment_score
+        if self.query_sequence and self.cigar:
+            target = self.aligned_target_sequence
+            query = self.aligned_query_sequence
+            align_len = len(query)
+            if align_len > 13:
+                target = target[:10] + "..."
+                query = query[:10] + "..."
+
+            length = "Length: %d" % align_len
+            return "\n".join([query, target, score, length])
+        return score
+
+    @property
+    def optimal_alignment_score(self):
+        """Optimal alignment score
+
+        Returns
+        -------
+        int
+            The optimal alignment score
+
+        """
+        return self.p.score1
+
+    @property
+    def suboptimal_alignment_score(self):
+        """Suboptimal alignment score
+
+        Returns
+        -------
+        int
+            The suboptimal alignment score
+
+        """
+        return self.p.score2
+
+    @property
+    def target_begin(self):
+        """Character index where the target's alignment begins
+
+        Returns
+        -------
+        int
+            The character index of the target sequence's alignment's beginning
+
+        Notes
+        -----
+        The result is a 0-based index by default
+
+        """
+        return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1
+                                                            >= 0) else -1
+
+    @property
+    def target_end_optimal(self):
+        """Character index where the target's optimal alignment ends
+
+        Returns
+        -------
+        int
+            The character index of the target sequence's optimal alignment's
+             end
+
+        Notes
+        -----
+        The result is a 0-based index by default
+
+        """
+        return self.p.ref_end1 + self.index_starts_at
+
+    @property
+    def target_end_suboptimal(self):
+        """Character index where the target's suboptimal alignment ends
+
+        Returns
+        -------
+        int
+            The character index of the target sequence's suboptimal alignment's
+             end
+
+        Notes
+        -----
+        The result is a 0-based index by default
+
+        """
+        return self.p.ref_end2 + self.index_starts_at
+
+    @property
+    def query_begin(self):
+        """Returns the character index at which the query sequence begins
+
+        Returns
+        -------
+        int
+            The character index of the query sequence beginning
+
+        Notes
+        -----
+        The result is a 0-based index by default
+
+        """
+        return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1
+                                                             >= 0) else -1
+
+    @property
+    def query_end(self):
+        """Character index at where query sequence ends
+
+        Returns
+        -------
+        int
+            The character index of the query sequence ending
+
+        Notes
+        -----
+        The result is a 0-based index by default
+
+        """
+        return self.p.read_end1 + self.index_starts_at
+
+    @property
+    def cigar(self):
+        """Cigar formatted string for the optimal alignment
+
+        Returns
+        -------
+        str
+            The cigar string of the optimal alignment
+
+        Notes
+        -----
+        The cigar string format is described in [1]_ and [2]_.
+
+        If there is no cigar or optimal alignment, this will return an empty
+        string
+
+        References
+        ----------
+        .. [1] http://genome.sph.umich.edu/wiki/SAM
+        .. [2] http://samtools.github.io/hts-specs/SAMv1.pdf
+
+        """
+        # Memoization! (1/2)
+        if self._cigar_string is not None:
+            return self._cigar_string
+        cigar_list = []
+        for i in range(self.p.cigarLen):
+            # stored the same as that in BAM format,
+            # high 28 bits: length, low 4 bits: M/I/D (0/1/2)
+
+            # Length, remove first 4 bits
+            cigar_list.append(str(self.p.cigar[i] >> 4))
+            # M/I/D, lookup first 4 bits in the mid_table
+            cigar_list.append(mid_table[self.p.cigar[i] & 0xf])
+        # Memoization! (2/2)
+        self._cigar_string = "".join(cigar_list)
+        return self._cigar_string
+
+    @property
+    def query_sequence(self):
+        """Query sequence
+
+        Returns
+        -------
+        str
+            The query sequence
+
+        """
+        return self.read_sequence
+
+    @property
+    def target_sequence(self):
+        """Target sequence
+
+        Returns
+        -------
+        str
+            The target sequence
+
+        """
+        return self.reference_sequence
+
+    @property
+    def aligned_query_sequence(self):
+        """Returns the query sequence aligned by the cigar
+
+        Returns
+        -------
+        str
+            Aligned query sequence
+
+        Notes
+        -----
+        This will return `None` if `suppress_sequences` was True when this
+        object was created
+
+        """
+        if self.query_sequence:
+            return self._get_aligned_sequence(self.query_sequence,
+                                              self._tuples_from_cigar(),
+                                              self.query_begin, self.query_end,
+                                              "D")
+        return None
+
+    @property
+    def aligned_target_sequence(self):
+        """Returns the target sequence aligned by the cigar
+
+        Returns
+        -------
+        str
+            Aligned target sequence
+
+        Notes
+        -----
+        This will return `None` if `suppress_sequences` was True when this
+        object was created
+
+        """
+        if self.target_sequence:
+            return self._get_aligned_sequence(self.target_sequence,
+                                              self._tuples_from_cigar(),
+                                              self.target_begin,
+                                              self.target_end_optimal,
+                                              "I")
+        return None
+
+    def set_zero_based(self, is_zero_based):
+        """Set the aligment indices to start at 0 if True else 1 if False
+
+        """
+        if is_zero_based:
+            self.index_starts_at = 0
+        else:
+            self.index_starts_at = 1
+
+    def is_zero_based(self):
+        """Returns True if alignment inidices start at 0 else False
+
+        Returns
+        -------
+        bool
+            Whether the alignment inidices start at 0
+
+        """
+        return self.index_starts_at == 0
+
+    def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,
+                              gap_type):
+        # Save the original index scheme and then set it to 0 (1/2)
+        orig_z_base = self.is_zero_based()
+        self.set_zero_based(True)
+        aligned_sequence = []
+        seq = sequence[begin:end + 1]
+        index = 0
+        for length, mid in tuple_cigar:
+            if mid == 'M':
+                aligned_sequence += [seq[i]
+                                     for i in range(index, length + index)]
+                index += length
+            elif mid == gap_type:
+                aligned_sequence += (['-'] * length)
+            else:
+                pass
+        # Our sequence end is sometimes beyond the cigar:
+        aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]
+        # Revert our index scheme to the original (2/2)
+        self.set_zero_based(orig_z_base)
+        return "".join(aligned_sequence)
+
+    def _tuples_from_cigar(self):
+        tuples = []
+        length_stack = []
+        for character in self.cigar:
+            if character.isdigit():
+                length_stack.append(character)
+            else:
+                tuples.append((int("".join(length_stack)), character))
+                length_stack = []
+        return tuples
+
+cdef class StripedSmithWaterman:
+    """Performs a striped (banded) Smith Waterman Alignment.
+
+    First a StripedSmithWaterman object must be instantiated with a query
+    sequence. The resulting object is then callable with a target sequence and
+    may be reused on a large collection of target sequences.
+
+    Parameters
+    ----------
+    query_sequence : string
+        The query sequence, this may be upper or lowercase from the set of
+        {A, C, G, T, N} (nucleotide) or from the set of
+        {A, R, N, D, C, Q, E, G, H, I, L, K, M, F, P, S, T, W, Y, V, B, Z, X, *
+        } (protein)
+    gap_open_penalty : int, optional
+        The penalty applied to creating a gap in the alignment. This CANNOT
+        be 0.
+        Default is 5.
+    gap_extend_penalty : int, optional
+        The penalty applied to extending a gap in the alignment. This CANNOT
+        be 0.
+        Default is 2.
+    score_size : int, optional
+        If your estimated best alignment score is < 255 this should be 0.
+        If your estimated best alignment score is >= 255, this should be 1.
+        If you don't know, this should be 2.
+        Default is 2.
+    mask_length : int, optional
+        The distance between the optimal and suboptimal alignment ending
+        position >= mask_length. We suggest to use len(query_sequence)/2, if
+        you don't have special concerns.
+        Detailed description of mask_length: After locating the optimal
+        alignment ending position, the suboptimal alignment score can be
+        heuristically found by checking the second largest score in the array
+        that contains the maximal score of each column of the SW matrix. In
+        order to avoid picking the scores that belong to the alignments
+        sharing the partial best alignment, SSW C library masks the reference
+        loci nearby (mask length = mask_length) the best alignment ending
+        position and locates the second largest score from the unmasked
+        elements.
+        Default is 15.
+    mask_auto : bool, optional
+        This will automatically set the used mask length to be
+        max(int(len(`query_sequence`)/2), `mask_length`).
+        Default is True.
+    score_only : bool, optional
+        This will prevent the best alignment beginning positions (BABP) and the
+        cigar from being returned as a result. This overrides any setting on
+        `score_filter`, `distance_filter`, and `override_skip_babp`. It has the
+        highest precedence.
+        Default is False.
+    score_filter : int, optional
+        If set, this will prevent the cigar and best alignment beginning
+        positions (BABP) from being returned if the optimal alignment score is
+        less than `score_filter` saving some time computationally. This filter
+        may be overridden by `score_only` (prevents BABP and cigar, regardless
+        of other arguments), `distance_filter` (may prevent cigar, but will
+        cause BABP to be calculated), and `override_skip_babp` (will ensure
+        BABP) returned.
+        Default is None.
+    distance_filter : int, optional
+        If set, this will prevent the cigar from being returned if the length
+        of the `query_sequence` or the `target_sequence` is less than
+        `distance_filter` saving some time computationally. The results of
+        this filter may be overridden by `score_only` (prevents BABP and cigar,
+        regardless of other arguments), and `score_filter` (may prevent cigar).
+        `override_skip_babp` has no effect with this filter applied, as BABP
+        must be calculated to perform the filter.
+        Default is None.
+    override_skip_babp : bool, optional
+        When True, the best alignment beginning positions (BABP) will always be
+        returned unless `score_only` is set to True.
+        Default is False.
+    protein : bool, optional
+        When True, the `query_sequence` and `target_sequence` will be read as
+        protein sequence. When False, the `query_sequence` and
+        `target_sequence` will be read as nucleotide sequence. If True, a
+        `substitution_matrix` must be supplied.
+        Default is False.
+    match_score : int, optional
+        When using a nucleotide sequence, the match_score is the score added
+        when a match occurs. This is ignored if `substitution_matrix` is
+        provided.
+        Default is 2.
+    mismatch_score : int, optional
+        When using a nucleotide sequence, the mismatch is the score subtracted
+        when a mismatch occurs. This should be a negative integer.
+        This is ignored if `substitution_matrix` is provided.
+        Default is -3.
+    substitution_matrix : 2D dict, optional
+        Provides the score for each possible substitution of sequence
+        characters. This may be used for protein or nucleotide sequences. The
+        entire set of possible combinations for the relevant sequence type MUST
+        be enumerated in the dict of dicts. This will override `match_score`
+        and `mismatch_score`. Required when `protein` is True.
+        Default is None.
+    suppress_sequences : bool, optional
+        If True, the query and target sequences will not be returned for
+        convenience.
+        Default is False.
+    zero_index : bool, optional
+        If True, all inidices will start at 0. If False, all inidices will
+        start at 1.
+        Default is True.
+
+    Notes
+    -----
+    This is a wrapper for the SSW package [1]_.
+    
+    `mask_length` has to be >= 15, otherwise the suboptimal alignment
+    information will NOT be returned.
+
+    `match_score` is a positive integer and `mismatch_score` is a negative
+    integer.
+
+    `match_score` and `mismatch_score` are only meaningful in the context of
+    nucleotide sequences.
+
+    A substitution matrix must be provided when working with protein sequences.
+    
+    References
+    ----------
+    .. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
+       Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
+       Applications". PLOS ONE (2013). Web. 11 July 2014.
+       http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
+
+    """
+    cdef s_profile *profile
+    cdef cnp.uint8_t gap_open_penalty
+    cdef cnp.uint8_t gap_extend_penalty
+    cdef cnp.uint8_t bit_flag
+    cdef cnp.uint16_t score_filter
+    cdef cnp.int32_t distance_filter
+    cdef cnp.int32_t mask_length
+    cdef str read_sequence
+    cdef int index_starts_at
+    cdef bool is_protein
+    cdef bool suppress_sequences
+    cdef cnp.ndarray __KEEP_IT_IN_SCOPE_read
+    cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
+
+    def __cinit__(self, query_sequence,
+                  gap_open_penalty=5,  # BLASTN Default
+                  gap_extend_penalty=2,  # BLASTN Default
+                  score_size=2,  # BLASTN Default
+                  mask_length=15,  # Minimum length for a suboptimal alignment
+                  mask_auto=True,
+                  score_only=False,
+                  score_filter=None,
+                  distance_filter=None,
+                  override_skip_babp=False,
+                  protein=False,
+                  match_score=2,  # BLASTN Default
+                  mismatch_score=-3,  # BLASTN Default
+                  substitution_matrix=None,
+                  suppress_sequences=False,
+                  zero_index=True):
+        # initalize our values
+        self.read_sequence = query_sequence
+        if gap_open_penalty <= 0:
+            raise ValueError("`gap_open_penalty` must be > 0")
+        self.gap_open_penalty = gap_open_penalty
+        if gap_extend_penalty <= 0:
+            raise ValueError("`gap_extend_penalty` must be > 0")
+        self.gap_extend_penalty = gap_extend_penalty
+        self.distance_filter = 0 if distance_filter is None else \
+            distance_filter
+        self.score_filter = 0 if score_filter is None else score_filter
+        self.suppress_sequences = suppress_sequences
+        self.is_protein = protein
+        self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)
+        # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
+        # Dijkstra knows what's up:
+        self.index_starts_at = 0 if zero_index else 1
+        # set up our matrix
+        cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
+        if substitution_matrix is None:
+            if protein:
+                raise Exception("Must provide a substitution matrix for"
+                                " protein sequences")
+            matrix = self._build_match_matrix(match_score, mismatch_score)
+        else:
+            matrix = self._convert_dict2d_to_matrix(substitution_matrix)
+        # Set up our mask_length
+        # Mask is recommended to be max(query_sequence/2, 15)
+        if mask_auto:
+            self.mask_length = len(query_sequence) / 2
+            if self.mask_length < mask_length:
+                self.mask_length = mask_length
+        else:
+            self.mask_length = mask_length
+
+        cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] read_seq
+        read_seq = self._seq_converter(query_sequence)
+
+        cdef cnp.int32_t read_length
+        read_length = len(query_sequence)
+
+        cdef cnp.int8_t s_size
+        s_size = score_size
+
+        cdef cnp.int32_t m_width
+        m_width = 24 if self.is_protein else 5
+
+        cdef s_profile* p
+        self.profile = ssw_init(<cnp.int8_t*> read_seq.data,
+                                read_length,
+                                <cnp.int8_t*> matrix.data,
+                                m_width,
+                                s_size)
+
+        # A hack to keep the python GC from eating our data
+        self.__KEEP_IT_IN_SCOPE_read = read_seq
+        self.__KEEP_IT_IN_SCOPE_matrix = matrix
+
+    def __call__(self, target_sequence):
+        """Align `target_sequence` to `query_sequence`
+
+        Parameters
+        ----------
+        target_sequence : str
+
+        Returns
+        -------
+        skbio.alignment.AlignmentStructure
+            The resulting alignment.
+
+        """
+        reference_sequence = target_sequence
+        cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] reference
+        reference = self._seq_converter(reference_sequence)
+
+        cdef cnp.int32_t ref_length
+        ref_length = len(reference_sequence)
+
+        cdef s_align *align
+        align = ssw_align(self.profile, <cnp.int8_t*> reference.data,
+                          ref_length, self.gap_open_penalty,
+                          self.gap_extend_penalty, self.bit_flag,
+                          self.score_filter, self.distance_filter,
+                          self.mask_length)
+
+        # Cython won't let me do this correctly, so duplicate code ahoy:
+        if self.suppress_sequences:
+            alignment = AlignmentStructure("", "", self.index_starts_at)
+        else:
+            alignment = AlignmentStructure(self.read_sequence,
+                                           reference_sequence,
+                                           self.index_starts_at)
+        alignment.__constructor(align)  # Hack to get a pointer through
+        return alignment
+
+    def __dealloc__(self):
+        if self.profile is not NULL:
+            init_destroy(self.profile)
+
+    def _get_bit_flag(self, override_skip_babp, score_only):
+        bit_flag = 0
+        if score_only:
+            return bit_flag
+        if override_skip_babp:
+            bit_flag = bit_flag | 0x8
+        if self.distance_filter != 0:
+            bit_flag = bit_flag | 0x4
+        if self.score_filter != 0:
+            bit_flag = bit_flag | 0x2
+        if bit_flag == 0 or bit_flag == 8:
+            bit_flag = bit_flag | 0x1
+        return bit_flag
+
+    cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(
+            self,
+            sequence):
+        cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
+        seq = np.empty(len(sequence), dtype=np.int8)
+        if self.is_protein:
+            for i, char in enumerate(sequence):
+                seq[i] = np_aa_table[ord(char)]
+        else:
+            for i, char in enumerate(sequence):
+                seq[i] = np_nt_table[ord(char)]
+        return seq
+
+    cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
+            _build_match_matrix(self, match_score, mismatch_score):
+        sequence_order = "ACGTN"
+        dict2d = {}
+        for row in sequence_order:
+            dict2d[row] = {}
+            for column in sequence_order:
+                if column == 'N' or row == 'N':
+                    dict2d[row][column] = 0
+                else:
+                    dict2d[row][column] = match_score if row == column \
+                        else mismatch_score
+        return self._convert_dict2d_to_matrix(dict2d)
+
+    cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
+            _convert_dict2d_to_matrix(self, dict2d):
+        if self.is_protein:
+            sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
+        else:
+            sequence_order = "ACGTN"
+        cdef int i = 0
+        length = len(sequence_order)
+        cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
+            np.empty(length*length, dtype=np.int8)
+        for row in sequence_order:
+            for column in sequence_order:
+                py_list_matrix[i] = dict2d[row][column]
+                i += 1
+        return py_list_matrix
+
+
+def local_pairwise_align_ssw(sequence1, sequence2,
+                             **kwargs):
+    """Align query and target sequences with Striped Smith-Waterman.
+
+    Parameters
+    ----------
+    sequence1 : str or BiologicalSequence
+        The first unaligned sequence
+    sequence2 : str or BiologicalSequence
+        The second unaligned sequence
+
+    Returns
+    -------
+    ``skbio.alignment.Alignment``
+        The resulting alignment as an Alignment object
+
+    Notes
+    -----
+    This is a wrapper for the SSW package [1]_.
+
+    For a complete list of optional keyword-arguments that can be provided,
+    see ``skbio.alignment.StripedSmithWaterman``.
+
+    The following kwargs will not have any effect: `suppress_sequences` and
+    `zero_index`
+
+    If an alignment does not meet a provided filter, `None` will be returned.
+    
+    References
+    ----------
+    .. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
+       Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
+       Applications". PLOS ONE (2013). Web. 11 July 2014.
+       http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
+     
+    See Also
+    --------
+    skbio.alignment.StripedSmithWaterman
+
+    """
+    # We need the sequences for `Alignment` to make sense, so don't let the
+    # user suppress them.
+    kwargs['suppress_sequences'] = False
+    kwargs['zero_index'] = True
+
+    if isinstance(sequence1, ProteinSequence):
+        kwargs['protein'] = True
+
+    query = StripedSmithWaterman(str(sequence1), **kwargs)
+    alignment = query(str(sequence2))
+
+    # If there is no cigar, then it has failed a filter. Return None.
+    if not alignment.cigar:
+        return None
+
+    start_end = None
+    if alignment.query_begin != -1:
+        start_end = [
+            (alignment.query_begin, alignment.query_end),
+            (alignment.target_begin, alignment.target_end_optimal)
+        ]
+    if kwargs.get('protein', False):
+        seqs = [
+            ProteinSequence(alignment.aligned_query_sequence, id='query'),
+            ProteinSequence(alignment.aligned_target_sequence, id='target')
+        ]
+    else:
+        seqs = [
+            NucleotideSequence(alignment.aligned_query_sequence, id='query'),
+            NucleotideSequence(alignment.aligned_target_sequence, id='target')
+        ]
+
+    return Alignment(seqs, score=alignment.optimal_alignment_score,
+                     start_end_positions=start_end)
diff --git a/skbio/alignment/tests/__init__.py b/skbio/alignment/tests/__init__.py
new file mode 100644
index 0000000..0bf0c55
--- /dev/null
+++ b/skbio/alignment/tests/__init__.py
@@ -0,0 +1,7 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/alignment/tests/test_alignment.py b/skbio/alignment/tests/test_alignment.py
new file mode 100644
index 0000000..d4b4a30
--- /dev/null
+++ b/skbio/alignment/tests/test_alignment.py
@@ -0,0 +1,1128 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+from collections import Counter, defaultdict, OrderedDict
+try:
+    from StringIO import StringIO
+except ImportError:  # python3 system
+    from io import StringIO
+import tempfile
+
+import numpy as np
+import numpy.testing as npt
+from scipy.spatial.distance import hamming
+
+from skbio import (NucleotideSequence, DNASequence, RNASequence, DNA, RNA,
+                   DistanceMatrix, Alignment, SequenceCollection)
+from skbio.alignment import (StockholmAlignment, SequenceCollectionError,
+                             StockholmParseError, AlignmentError)
+
+
+class SequenceCollectionTests(TestCase):
+    def setUp(self):
+        self.d1 = DNASequence('GATTACA', id="d1")
+        self.d2 = DNASequence('TTG', id="d2")
+        self.d3 = DNASequence('GTATACA', id="d3")
+        self.d1_lower = DNASequence('gattaca', id="d1")
+        self.d2_lower = DNASequence('ttg', id="d2")
+        self.d3_lower = DNASequence('gtataca', id="d3")
+        self.r1 = RNASequence('GAUUACA', id="r1")
+        self.r2 = RNASequence('UUG', id="r2")
+        self.r3 = RNASequence('U-----UGCC--', id="r3")
+
+        self.i1 = DNASequence('GATXACA', id="i1")
+
+        self.seqs1 = [self.d1, self.d2]
+        self.seqs1_lower = [self.d1_lower, self.d2_lower]
+        self.seqs2 = [self.r1, self.r2, self.r3]
+        self.seqs3 = self.seqs1 + self.seqs2
+        self.seqs4 = [self.d1, self.d3]
+
+        self.seqs1_t = [('d1', 'GATTACA'), ('d2', 'TTG')]
+        self.seqs2_t = [('r1', 'GAUUACA'), ('r2', 'UUG'),
+                        ('r3', 'U-----UGCC--')]
+        self.seqs3_t = self.seqs1_t + self.seqs2_t
+
+        self.s1 = SequenceCollection(self.seqs1)
+        self.s1_lower = SequenceCollection(self.seqs1_lower)
+        self.s2 = SequenceCollection(self.seqs2)
+        self.s3 = SequenceCollection(self.seqs3)
+        self.s4 = SequenceCollection(self.seqs4)
+        self.empty = SequenceCollection([])
+
+        self.invalid_s1 = SequenceCollection([self.i1])
+
+    def test_init(self):
+        SequenceCollection(self.seqs1)
+        SequenceCollection(self.seqs2)
+        SequenceCollection(self.seqs3)
+        SequenceCollection([])
+
+    def test_init_fail(self):
+        # sequences with overlapping ids
+        s1 = [self.d1, self.d1]
+        self.assertRaises(SequenceCollectionError, SequenceCollection, s1)
+
+    def test_init_validate(self):
+        SequenceCollection(self.seqs1, validate=True)
+        SequenceCollection(self.seqs1, validate=True)
+        # can't validate self.seqs2 as a DNASequence
+        self.assertRaises(SequenceCollectionError, SequenceCollection,
+                          self.invalid_s1, validate=True)
+
+    def test_from_fasta_records(self):
+        SequenceCollection.from_fasta_records(self.seqs1_t, DNASequence)
+        SequenceCollection.from_fasta_records(self.seqs2_t, RNASequence)
+        SequenceCollection.from_fasta_records(self.seqs3_t, NucleotideSequence)
+
+    def test_contains(self):
+        self.assertTrue('d1' in self.s1)
+        self.assertTrue('r2' in self.s2)
+        self.assertFalse('r2' in self.s1)
+
+    def test_eq(self):
+        self.assertTrue(self.s1 == self.s1)
+        self.assertFalse(self.s1 == self.s2)
+
+        # different objects can be equal
+        self.assertTrue(self.s1 == SequenceCollection([self.d1, self.d2]))
+        self.assertTrue(SequenceCollection([self.d1, self.d2]) == self.s1)
+
+        # SequenceCollections with different number of sequences are not equal
+        self.assertFalse(self.s1 == SequenceCollection([self.d1]))
+
+        class FakeSequenceCollection(SequenceCollection):
+            pass
+        # SequenceCollections of different types are not equal
+        self.assertFalse(self.s4 == FakeSequenceCollection([self.d1, self.d3]))
+        self.assertFalse(self.s4 == Alignment([self.d1, self.d3]))
+
+        # SequenceCollections with different sequences are not equal
+        self.assertFalse(self.s1 == SequenceCollection([self.d1, self.r1]))
+
+    def test_getitem(self):
+        self.assertEqual(self.s1[0], self.d1)
+        self.assertEqual(self.s1[1], self.d2)
+        self.assertEqual(self.s2[0], self.r1)
+        self.assertEqual(self.s2[1], self.r2)
+
+        self.assertRaises(IndexError, self.empty.__getitem__, 0)
+        self.assertRaises(KeyError, self.empty.__getitem__, '0')
+
+    def test_iter(self):
+        s1_iter = iter(self.s1)
+        count = 0
+        for actual, expected in zip(s1_iter, self.seqs1):
+            count += 1
+            self.assertEqual(actual, expected)
+        self.assertEqual(count, len(self.seqs1))
+        self.assertRaises(StopIteration, lambda: next(s1_iter))
+
+    def test_len(self):
+        self.assertEqual(len(self.s1), 2)
+        self.assertEqual(len(self.s2), 3)
+        self.assertEqual(len(self.s3), 5)
+        self.assertEqual(len(self.empty), 0)
+
+    def test_ne(self):
+        self.assertFalse(self.s1 != self.s1)
+        self.assertTrue(self.s1 != self.s2)
+
+        # SequenceCollections with different number of sequences are not equal
+        self.assertTrue(self.s1 != SequenceCollection([self.d1]))
+
+        class FakeSequenceCollection(SequenceCollection):
+            pass
+        # SequenceCollections of different types are not equal
+        self.assertTrue(self.s4 != FakeSequenceCollection([self.d1, self.d3]))
+        self.assertTrue(self.s4 != Alignment([self.d1, self.d3]))
+
+        # SequenceCollections with different sequences are not equal
+        self.assertTrue(self.s1 !=
+                        SequenceCollection([self.d1, self.r1]))
+
+    def test_repr(self):
+        self.assertEqual(repr(self.s1),
+                         "<SequenceCollection: n=2; "
+                         "mean +/- std length=5.00 +/- 2.00>")
+        self.assertEqual(repr(self.s2),
+                         "<SequenceCollection: n=3; "
+                         "mean +/- std length=7.33 +/- 3.68>")
+        self.assertEqual(repr(self.s3),
+                         "<SequenceCollection: n=5; "
+                         "mean +/- std length=6.40 +/- 3.32>")
+        self.assertEqual(repr(self.empty),
+                         "<SequenceCollection: n=0; "
+                         "mean +/- std length=0.00 +/- 0.00>")
+
+    def test_reversed(self):
+        s1_iter = reversed(self.s1)
+        count = 0
+        for actual, expected in zip(s1_iter, self.seqs1[::-1]):
+            count += 1
+            self.assertEqual(actual, expected)
+        self.assertEqual(count, len(self.seqs1))
+        self.assertRaises(StopIteration, lambda: next(s1_iter))
+
+    def test_k_word_frequencies(self):
+        expected1 = defaultdict(float)
+        expected1['A'] = 3 / 7.
+        expected1['C'] = 1 / 7.
+        expected1['G'] = 1 / 7.
+        expected1['T'] = 2 / 7.
+        expected2 = defaultdict(float)
+        expected2['G'] = 1 / 3.
+        expected2['T'] = 2 / 3.
+        self.assertEqual(self.s1.k_word_frequencies(k=1),
+                         [expected1, expected2])
+
+        expected1 = defaultdict(float)
+        expected1['GAT'] = 1 / 2.
+        expected1['TAC'] = 1 / 2.
+        expected2 = defaultdict(float)
+        expected2['TTG'] = 1 / 1.
+        self.assertEqual(self.s1.k_word_frequencies(k=3, overlapping=False),
+                         [expected1, expected2])
+
+        self.assertEqual(self.empty.k_word_frequencies(k=1), [])
+
+        # Test to ensure floating point precision bug isn't present. See the
+        # tests for BiologicalSequence.k_word_frequencies for more details.
+        sc = SequenceCollection([RNA('C' * 10, id='s1'),
+                                 RNA('G' * 10, id='s2')])
+        self.assertEqual(sc.k_word_frequencies(1),
+                         [defaultdict(float, {'C': 1.0}),
+                          defaultdict(float, {'G': 1.0})])
+
+    def test_str(self):
+        exp1 = ">d1\nGATTACA\n>d2\nTTG\n"
+        self.assertEqual(str(self.s1), exp1)
+        exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n"
+        self.assertEqual(str(self.s2), exp2)
+        exp4 = ""
+        self.assertEqual(str(self.empty), exp4)
+
+    def test_distances(self):
+        s1 = SequenceCollection([DNA("ACGT", "d1"), DNA("ACGG", "d2")])
+        expected = [[0, 0.25],
+                    [0.25, 0]]
+        expected = DistanceMatrix(expected, ['d1', 'd2'])
+        actual = s1.distances(hamming)
+        self.assertEqual(actual, expected)
+
+        # alt distance function provided
+        def dumb_distance(s1, s2):
+            return 42.
+        expected = [[0, 42.],
+                    [42., 0]]
+        expected = DistanceMatrix(expected, ['d1', 'd2'])
+        actual = s1.distances(dumb_distance)
+        self.assertEqual(actual, expected)
+
+    def test_distribution_stats(self):
+        actual1 = self.s1.distribution_stats()
+        self.assertEqual(actual1[0], 2)
+        self.assertAlmostEqual(actual1[1], 5.0, 3)
+        self.assertAlmostEqual(actual1[2], 2.0, 3)
+
+        actual2 = self.s2.distribution_stats()
+        self.assertEqual(actual2[0], 3)
+        self.assertAlmostEqual(actual2[1], 7.333, 3)
+        self.assertAlmostEqual(actual2[2], 3.682, 3)
+
+        actual3 = self.s3.distribution_stats()
+        self.assertEqual(actual3[0], 5)
+        self.assertAlmostEqual(actual3[1], 6.400, 3)
+        self.assertAlmostEqual(actual3[2], 3.323, 3)
+
+        actual4 = self.empty.distribution_stats()
+        self.assertEqual(actual4[0], 0)
+        self.assertEqual(actual4[1], 0.0)
+        self.assertEqual(actual4[2], 0.0)
+
+    def test_degap(self):
+        expected = [(id_, seq.replace('.', '').replace('-', ''))
+                    for id_, seq in self.seqs2_t]
+        expected = SequenceCollection.from_fasta_records(expected, RNASequence)
+        actual = self.s2.degap()
+        self.assertEqual(actual, expected)
+
+    def test_get_seq(self):
+        self.assertEqual(self.s1.get_seq('d1'), self.d1)
+        self.assertEqual(self.s1.get_seq('d2'), self.d2)
+
+    def test_ids(self):
+        self.assertEqual(self.s1.ids(), ['d1', 'd2'])
+        self.assertEqual(self.s2.ids(), ['r1', 'r2', 'r3'])
+        self.assertEqual(self.s3.ids(),
+                         ['d1', 'd2', 'r1', 'r2', 'r3'])
+        self.assertEqual(self.empty.ids(), [])
+
+    def _assert_sequence_collections_equal(self, observed, expected):
+        """Compare SequenceCollections strictly."""
+        # TODO remove this custom equality testing code when SequenceCollection
+        # has an equals method (part of #656). We need this method to include
+        # IDs in the comparison (not part of SequenceCollection.__eq__).
+        self.assertEqual(observed, expected)
+        for obs_seq, exp_seq in zip(observed, expected):
+            self.assertTrue(obs_seq.equals(exp_seq))
+
+    def test_update_ids_default_behavior(self):
+        # 3 seqs
+        exp_sc = SequenceCollection([
+            RNA('GAUUACA', id="1"),
+            RNA('UUG', id="2"),
+            RNA('U-----UGCC--', id="3")
+        ])
+        exp_id_map = {'1': 'r1', '2': 'r2', '3': 'r3'}
+        obs_sc, obs_id_map = self.s2.update_ids()
+        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_id_map, exp_id_map)
+
+        # empty
+        obs_sc, obs_id_map = self.empty.update_ids()
+        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        self.assertEqual(obs_id_map, {})
+
+    def test_update_ids_prefix(self):
+        # 3 seqs
+        exp_sc = SequenceCollection([
+            RNA('GAUUACA', id="abc1"),
+            RNA('UUG', id="abc2"),
+            RNA('U-----UGCC--', id="abc3")
+        ])
+        exp_id_map = {'abc1': 'r1', 'abc2': 'r2', 'abc3': 'r3'}
+        obs_sc, obs_id_map = self.s2.update_ids(prefix='abc')
+        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_id_map, exp_id_map)
+
+        # empty
+        obs_sc, obs_id_map = self.empty.update_ids(prefix='abc')
+        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        self.assertEqual(obs_id_map, {})
+
+    def test_update_ids_fn_parameter(self):
+        def append_42(ids):
+            return [id_ + '-42' for id_ in ids]
+
+        # 3 seqs
+        exp_sc = SequenceCollection([
+            RNA('GAUUACA', id="r1-42"),
+            RNA('UUG', id="r2-42"),
+            RNA('U-----UGCC--', id="r3-42")
+        ])
+        exp_id_map = {'r1-42': 'r1', 'r2-42': 'r2', 'r3-42': 'r3'}
+        obs_sc, obs_id_map = self.s2.update_ids(fn=append_42)
+        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_id_map, exp_id_map)
+
+        # empty
+        obs_sc, obs_id_map = self.empty.update_ids(fn=append_42)
+        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        self.assertEqual(obs_id_map, {})
+
+    def test_update_ids_ids_parameter(self):
+        # 3 seqs
+        exp_sc = SequenceCollection([
+            RNA('GAUUACA', id="abc"),
+            RNA('UUG', id="def"),
+            RNA('U-----UGCC--', id="ghi")
+        ])
+        exp_id_map = {'abc': 'r1', 'def': 'r2', 'ghi': 'r3'}
+        obs_sc, obs_id_map = self.s2.update_ids(ids=('abc', 'def', 'ghi'))
+        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_id_map, exp_id_map)
+
+        # empty
+        obs_sc, obs_id_map = self.empty.update_ids(ids=[])
+        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        self.assertEqual(obs_id_map, {})
+
+    def test_update_ids_sequence_attributes_propagated(self):
+        # 1 seq
+        exp_sc = Alignment([
+            DNA('ACGT', id="abc", description='desc', quality=range(4))
+        ])
+        exp_id_map = {'abc': 'seq1'}
+
+        obj = Alignment([
+            DNA('ACGT', id="seq1", description='desc', quality=range(4))
+        ])
+
+        obs_sc, obs_id_map = obj.update_ids(ids=('abc',))
+        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_id_map, exp_id_map)
+
+        # 2 seqs
+        exp_sc = Alignment([
+            DNA('ACGT', id="abc", description='desc1', quality=range(4)),
+            DNA('TGCA', id="def", description='desc2', quality=range(4)[::-1])
+        ])
+        exp_id_map = {'abc': 'seq1', 'def': 'seq2'}
+
+        obj = Alignment([
+            DNA('ACGT', id="seq1", description='desc1', quality=(0, 1, 2, 3)),
+            DNA('TGCA', id="seq2", description='desc2', quality=(3, 2, 1, 0))
+        ])
+
+        obs_sc, obs_id_map = obj.update_ids(ids=('abc', 'def'))
+        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_id_map, exp_id_map)
+
+    def test_update_ids_invalid_parameter_combos(self):
+        with self.assertRaisesRegexp(SequenceCollectionError, 'ids and fn'):
+            self.s1.update_ids(fn=lambda e: e, ids=['foo', 'bar'])
+
+        with self.assertRaisesRegexp(SequenceCollectionError, 'prefix'):
+            self.s1.update_ids(ids=['foo', 'bar'], prefix='abc')
+
+        with self.assertRaisesRegexp(SequenceCollectionError, 'prefix'):
+            self.s1.update_ids(fn=lambda e: e, prefix='abc')
+
+    def test_update_ids_invalid_ids(self):
+        # incorrect number of new ids
+        with self.assertRaisesRegexp(SequenceCollectionError, '3 != 2'):
+            self.s1.update_ids(ids=['foo', 'bar', 'baz'])
+        with self.assertRaisesRegexp(SequenceCollectionError, '4 != 2'):
+            self.s1.update_ids(fn=lambda e: ['foo', 'bar', 'baz', 'abc'])
+
+        # duplicates
+        with self.assertRaisesRegexp(SequenceCollectionError, 'foo'):
+            self.s2.update_ids(ids=['foo', 'bar', 'foo'])
+        with self.assertRaisesRegexp(SequenceCollectionError, 'bar'):
+            self.s2.update_ids(fn=lambda e: ['foo', 'bar', 'bar'])
+
+    def test_int_map(self):
+        expected1 = {"1": self.d1, "2": self.d2}
+        expected2 = {"1": "d1", "2": "d2"}
+        obs = npt.assert_warns(DeprecationWarning, self.s1.int_map)
+        self.assertEqual(obs, (expected1, expected2))
+
+        expected1 = {"h-1": self.d1, "h-2": self.d2}
+        expected2 = {"h-1": "d1", "h-2": "d2"}
+        obs = npt.assert_warns(DeprecationWarning, self.s1.int_map,
+                               prefix='h-')
+        self.assertEqual(obs, (expected1, expected2))
+
+    def test_is_empty(self):
+        self.assertFalse(self.s1.is_empty())
+        self.assertFalse(self.s2.is_empty())
+        self.assertFalse(self.s3.is_empty())
+
+        self.assertTrue(self.empty.is_empty())
+
+    def test_is_valid(self):
+        self.assertTrue(self.s1.is_valid())
+        self.assertTrue(self.s2.is_valid())
+        self.assertTrue(self.s3.is_valid())
+        self.assertTrue(self.empty.is_valid())
+
+        self.assertFalse(self.invalid_s1.is_valid())
+
+    def test_iteritems(self):
+        self.assertEqual(list(self.s1.iteritems()),
+                         [(s.id, s) for s in self.s1])
+
+    def test_lower(self):
+        self.assertEqual(self.s1.lower(), self.s1_lower)
+
+    def test_sequence_count(self):
+        self.assertEqual(self.s1.sequence_count(), 2)
+        self.assertEqual(self.s2.sequence_count(), 3)
+        self.assertEqual(self.s3.sequence_count(), 5)
+        self.assertEqual(self.empty.sequence_count(), 0)
+
+    def test_sequence_lengths(self):
+        self.assertEqual(self.s1.sequence_lengths(), [7, 3])
+        self.assertEqual(self.s2.sequence_lengths(), [7, 3, 12])
+        self.assertEqual(self.s3.sequence_lengths(), [7, 3, 7, 3, 12])
+        self.assertEqual(self.empty.sequence_lengths(), [])
+
+    def test_to_fasta(self):
+        exp1 = ">d1\nGATTACA\n>d2\nTTG\n"
+        self.assertEqual(self.s1.to_fasta(), exp1)
+        exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n"
+        self.assertEqual(self.s2.to_fasta(), exp2)
+
+    def test_toFasta(self):
+        exp = ">d1\nGATTACA\n>d2\nTTG\n"
+        obs = npt.assert_warns(DeprecationWarning, self.s1.toFasta)
+        self.assertEqual(obs, exp)
+
+    def test_upper(self):
+        self.assertEqual(self.s1_lower.upper(), self.s1)
+
+
+class AlignmentTests(TestCase):
+
+    def setUp(self):
+        self.d1 = DNASequence('..ACC-GTTGG..', id="d1")
+        self.d2 = DNASequence('TTACCGGT-GGCC', id="d2")
+        self.d3 = DNASequence('.-ACC-GTTGC--', id="d3")
+
+        self.r1 = RNASequence('UUAU-', id="r1")
+        self.r2 = RNASequence('ACGUU', id="r2")
+
+        self.seqs1 = [self.d1, self.d2, self.d3]
+        self.seqs2 = [self.r1, self.r2]
+
+        self.seqs1_t = [('d1', '..ACC-GTTGG..'), ('d2', 'TTACCGGT-GGCC'),
+                        ('d3', '.-ACC-GTTGC--')]
+        self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
+
+        self.a1 = Alignment(self.seqs1)
+        self.a2 = Alignment(self.seqs2)
+        self.a3 = Alignment(self.seqs2, score=42.0,
+                            start_end_positions=[(0, 3), (5, 9)])
+        self.a4 = Alignment(self.seqs2, score=-42.0,
+                            start_end_positions=[(1, 4), (6, 10)])
+
+        # no sequences
+        self.empty = Alignment([])
+
+        # sequences, but no positions
+        self.no_positions = Alignment([RNA('', id='a'), RNA('', id='b')])
+
+    def test_degap(self):
+        expected = [(id_, seq.replace('.', '').replace('-', ''))
+                    for id_, seq in self.seqs1_t]
+        expected = SequenceCollection.from_fasta_records(expected, DNASequence)
+        actual = self.a1.degap()
+        self.assertEqual(actual, expected)
+
+        expected = [(id_, seq.replace('.', '').replace('-', ''))
+                    for id_, seq in self.seqs2_t]
+        expected = SequenceCollection.from_fasta_records(expected, RNASequence)
+        actual = self.a2.degap()
+        self.assertEqual(actual, expected)
+
+    def test_distances(self):
+        expected = [[0, 6. / 13, 4. / 13],
+                    [6. / 13, 0, 7. / 13],
+                    [4. / 13, 7. / 13, 0]]
+        expected = DistanceMatrix(expected, ['d1', 'd2', 'd3'])
+        actual = self.a1.distances()
+        self.assertEqual(actual, expected)
+
+        # alt distance function provided
+        def dumb_distance(s1, s2):
+            return 42.
+        expected = [[0, 42., 42.],
+                    [42., 0, 42.],
+                    [42., 42., 0]]
+        expected = DistanceMatrix(expected, ['d1', 'd2', 'd3'])
+        actual = self.a1.distances(dumb_distance)
+        self.assertEqual(actual, expected)
+
+    def test_score(self):
+        self.assertEqual(self.a3.score(), 42.0)
+        self.assertEqual(self.a4.score(), -42.0)
+
+    def test_start_end_positions(self):
+        self.assertEqual(self.a3.start_end_positions(), [(0, 3), (5, 9)])
+        self.assertEqual(self.a4.start_end_positions(), [(1, 4), (6, 10)])
+
+    def test_subalignment(self):
+        # keep seqs by ids
+        actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3'])
+        expected = Alignment([self.d1, self.d3])
+        self.assertEqual(actual, expected)
+
+        # keep seqs by indices
+        actual = self.a1.subalignment(seqs_to_keep=[0, 2])
+        expected = Alignment([self.d1, self.d3])
+        self.assertEqual(actual, expected)
+
+        # keep seqs by ids (invert)
+        actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3'],
+                                      invert_seqs_to_keep=True)
+        expected = Alignment([self.d2])
+        self.assertEqual(actual, expected)
+
+        # keep seqs by indices (invert)
+        actual = self.a1.subalignment(seqs_to_keep=[0, 2],
+                                      invert_seqs_to_keep=True)
+        expected = Alignment([self.d2])
+        self.assertEqual(actual, expected)
+
+        # keep positions
+        actual = self.a1.subalignment(positions_to_keep=[0, 2, 3])
+        d1 = DNASequence('.AC', id="d1")
+        d2 = DNASequence('TAC', id="d2")
+        d3 = DNASequence('.AC', id="d3")
+        expected = Alignment([d1, d2, d3])
+        self.assertEqual(actual, expected)
+
+        # keep positions (invert)
+        actual = self.a1.subalignment(positions_to_keep=[0, 2, 3],
+                                      invert_positions_to_keep=True)
+        d1 = DNASequence('.C-GTTGG..', id="d1")
+        d2 = DNASequence('TCGGT-GGCC', id="d2")
+        d3 = DNASequence('-C-GTTGC--', id="d3")
+        expected = Alignment([d1, d2, d3])
+        self.assertEqual(actual, expected)
+
+        # keep seqs and positions
+        actual = self.a1.subalignment(seqs_to_keep=[0, 2],
+                                      positions_to_keep=[0, 2, 3])
+        d1 = DNASequence('.AC', id="d1")
+        d3 = DNASequence('.AC', id="d3")
+        expected = Alignment([d1, d3])
+        self.assertEqual(actual, expected)
+
+        # keep seqs and positions (invert)
+        actual = self.a1.subalignment(seqs_to_keep=[0, 2],
+                                      positions_to_keep=[0, 2, 3],
+                                      invert_seqs_to_keep=True,
+                                      invert_positions_to_keep=True)
+        d2 = DNASequence('TCGGT-GGCC', id="d2")
+        expected = Alignment([d2])
+        self.assertEqual(actual, expected)
+
+    def test_subalignment_filter_out_everything(self):
+        exp = Alignment([])
+
+        # no sequences
+        obs = self.a1.subalignment(seqs_to_keep=None, invert_seqs_to_keep=True)
+        self.assertEqual(obs, exp)
+
+        # no positions
+        obs = self.a1.subalignment(positions_to_keep=None,
+                                   invert_positions_to_keep=True)
+        self.assertEqual(obs, exp)
+
+    def test_init_not_equal_lengths(self):
+        invalid_seqs = [self.d1, self.d2, self.d3,
+                        DNASequence('.-ACC-GTGC--', id="i2")]
+        self.assertRaises(AlignmentError, Alignment,
+                          invalid_seqs)
+
+    def test_init_equal_lengths(self):
+        seqs = [self.d1, self.d2, self.d3]
+        Alignment(seqs)
+
+    def test_init_validate(self):
+        Alignment(self.seqs1, validate=True)
+
+        # invalid DNA character
+        invalid_seqs1 = [self.d1, self.d2, self.d3,
+                         DNASequence('.-ACC-GTXGC--', id="i1")]
+        self.assertRaises(SequenceCollectionError, Alignment,
+                          invalid_seqs1, validate=True)
+
+    def test_iter_positions(self):
+        actual = list(self.a2.iter_positions())
+        expected = [[RNASequence(j) for j in i] for i in
+                    ['UA', 'UC', 'AG', 'UU', '-U']]
+        self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
+        self.assertEqual(actual, expected)
+
+        actual = list(self.a2.iter_positions(constructor=str))
+        expected = [list('UA'),
+                    list('UC'),
+                    list('AG'),
+                    list('UU'),
+                    list('-U')]
+        self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
+        self.assertEqual(actual, expected)
+
+    def test_majority_consensus(self):
+        d1 = DNASequence('TTT', id="d1")
+        d2 = DNASequence('TT-', id="d2")
+        d3 = DNASequence('TC-', id="d3")
+        a1 = Alignment([d1, d2, d3])
+        self.assertTrue(a1.majority_consensus().equals(DNASequence('TT-')))
+
+        d1 = DNASequence('T', id="d1")
+        d2 = DNASequence('A', id="d2")
+        a1 = Alignment([d1, d2])
+        self.assertTrue(a1.majority_consensus() in
+                        [DNASequence('T'), DNASequence('A')])
+
+        self.assertEqual(self.empty.majority_consensus(), '')
+
+    def test_majority_consensus_constructor(self):
+        d1 = DNASequence('TTT', id="d1")
+        d2 = DNASequence('TT-', id="d2")
+        d3 = DNASequence('TC-', id="d3")
+        a1 = Alignment([d1, d2, d3])
+
+        obs = npt.assert_warns(DeprecationWarning, a1.majority_consensus,
+                               constructor=str)
+        self.assertEqual(obs, 'TT-')
+
+    def test_omit_gap_positions(self):
+        expected = self.a2
+        self.assertEqual(self.a2.omit_gap_positions(1.0), expected)
+        self.assertEqual(self.a2.omit_gap_positions(0.51), expected)
+
+        r1 = RNASequence('UUAU', id="r1")
+        r2 = RNASequence('ACGU', id="r2")
+        expected = Alignment([r1, r2])
+        self.assertEqual(self.a2.omit_gap_positions(0.49), expected)
+
+        r1 = RNASequence('UUAU', id="r1")
+        r2 = RNASequence('ACGU', id="r2")
+        expected = Alignment([r1, r2])
+        self.assertEqual(self.a2.omit_gap_positions(0.0), expected)
+
+        self.assertEqual(self.empty.omit_gap_positions(0.0), self.empty)
+        self.assertEqual(self.empty.omit_gap_positions(0.49), self.empty)
+        self.assertEqual(self.empty.omit_gap_positions(1.0), self.empty)
+
+        # Test to ensure floating point precision bug isn't present. See the
+        # tests for Alignment.position_frequencies for more details.
+        seqs = []
+        for i in range(33):
+            seqs.append(DNA('-.', id=str(i)))
+        aln = Alignment(seqs)
+        self.assertEqual(aln.omit_gap_positions(1 - np.finfo(float).eps),
+                         Alignment([DNA('', id=str(i)) for i in range(33)]))
+
+    def test_omit_gap_sequences(self):
+        expected = self.a2
+        self.assertEqual(self.a2.omit_gap_sequences(1.0), expected)
+        self.assertEqual(self.a2.omit_gap_sequences(0.20), expected)
+
+        expected = Alignment([self.r2])
+        self.assertEqual(self.a2.omit_gap_sequences(0.19), expected)
+
+        self.assertEqual(self.empty.omit_gap_sequences(0.0), self.empty)
+        self.assertEqual(self.empty.omit_gap_sequences(0.2), self.empty)
+        self.assertEqual(self.empty.omit_gap_sequences(1.0), self.empty)
+
+        # Test to ensure floating point precision bug isn't present. See the
+        # tests for Alignment.position_frequencies for more details.
+        aln = Alignment([DNA('.' * 33, id='abc'), DNA('-' * 33, id='def')])
+        self.assertEqual(aln.omit_gap_sequences(1 - np.finfo(float).eps),
+                         Alignment([]))
+
+    def test_position_counters(self):
+        self.assertEqual(self.empty.position_counters(), [])
+
+        self.assertEqual(self.no_positions.position_counters(), [])
+
+        expected = [Counter({'U': 1, 'A': 1}),
+                    Counter({'U': 1, 'C': 1}),
+                    Counter({'A': 1, 'G': 1}),
+                    Counter({'U': 2}),
+                    Counter({'-': 1, 'U': 1})]
+        self.assertEqual(self.a2.position_counters(), expected)
+
+    def test_position_frequencies(self):
+        self.assertEqual(self.empty.position_frequencies(), [])
+
+        self.assertEqual(self.no_positions.position_frequencies(), [])
+
+        expected = [defaultdict(float, {'U': 0.5, 'A': 0.5}),
+                    defaultdict(float, {'U': 0.5, 'C': 0.5}),
+                    defaultdict(float, {'A': 0.5, 'G': 0.5}),
+                    defaultdict(float, {'U': 1.0}),
+                    defaultdict(float, {'-': 0.5, 'U': 0.5})]
+        self.assertEqual(self.a2.position_frequencies(), expected)
+
+    def test_position_frequencies_floating_point_precision(self):
+        # Test that a position with no variation yields a frequency of exactly
+        # 1.0. Note that it is important to use self.assertEqual here instead
+        # of self.assertAlmostEqual because we want to test for exactly 1.0. A
+        # previous implementation of Alignment.position_frequencies added
+        # (1 / sequence_count) for each occurrence of a character in a position
+        # to compute the frequencies (see
+        # https://github.com/biocore/scikit-bio/issues/801). In certain cases,
+        # this yielded a frequency slightly less than 1.0 due to roundoff
+        # error. The test case here uses an alignment of 10 sequences with no
+        # variation at a position. This test case exposes the roundoff error
+        # present in the previous implementation because 1/10 added 10 times
+        # yields a number slightly less than 1.0. This occurs because 1/10
+        # cannot be represented exactly as a floating point number.
+        seqs = []
+        for i in range(10):
+            seqs.append(DNA('A', id=str(i)))
+        aln = Alignment(seqs)
+        self.assertEqual(aln.position_frequencies(),
+                         [defaultdict(float, {'A': 1.0})])
+
+    def test_position_entropies(self):
+        # tested by calculating values as described in this post:
+        #  http://stackoverflow.com/a/15476958/3424666
+        expected = [0.69314, 0.69314, 0.69314, 0.0, np.nan]
+        np.testing.assert_almost_equal(self.a2.position_entropies(),
+                                       expected, 5)
+
+        expected = [1.0, 1.0, 1.0, 0.0, np.nan]
+        np.testing.assert_almost_equal(self.a2.position_entropies(base=2),
+                                       expected, 5)
+
+        np.testing.assert_almost_equal(self.empty.position_entropies(base=2),
+                                       [])
+
+    def test_k_word_frequencies(self):
+        expected = [defaultdict(float, {'U': 3 / 5, 'A': 1 / 5, '-': 1 / 5}),
+                    defaultdict(float, {'A': 1 / 5, 'C': 1 / 5, 'G': 1 / 5,
+                                        'U': 2 / 5})]
+        actual = self.a2.k_word_frequencies(k=1)
+        for a, e in zip(actual, expected):
+            self.assertEqual(sorted(a), sorted(e), 5)
+            np.testing.assert_almost_equal(sorted(a.values()),
+                                           sorted(e.values()), 5)
+
+    def test_sequence_length(self):
+        self.assertEqual(self.a1.sequence_length(), 13)
+        self.assertEqual(self.a2.sequence_length(), 5)
+        self.assertEqual(self.empty.sequence_length(), 0)
+
+    def test_to_phylip(self):
+        d1 = DNASequence('..ACC-GTTGG..', id="d1")
+        d2 = DNASequence('TTACCGGT-GGCC', id="d2")
+        d3 = DNASequence('.-ACC-GTTGC--', id="d3")
+        a = Alignment([d1, d2, d3])
+
+        phylip_str, id_map = npt.assert_warns(DeprecationWarning, a.to_phylip,
+                                              map_labels=False)
+        self.assertEqual(id_map, {'d1': 'd1',
+                                  'd3': 'd3',
+                                  'd2': 'd2'})
+        expected = "\n".join(["3 13",
+                              "d1 ..ACC-GTTGG..",
+                              "d2 TTACCGGT-GGCC",
+                              "d3 .-ACC-GTTGC--"])
+        self.assertEqual(phylip_str, expected)
+
+    def test_to_phylip_map_labels(self):
+        d1 = DNASequence('..ACC-GTTGG..', id="d1")
+        d2 = DNASequence('TTACCGGT-GGCC', id="d2")
+        d3 = DNASequence('.-ACC-GTTGC--', id="d3")
+        a = Alignment([d1, d2, d3])
+
+        phylip_str, id_map = npt.assert_warns(DeprecationWarning, a.to_phylip,
+                                              map_labels=True,
+                                              label_prefix="s")
+        self.assertEqual(id_map, {'s1': 'd1',
+                                  's3': 'd3',
+                                  's2': 'd2'})
+        expected = "\n".join(["3 13",
+                              "s1 ..ACC-GTTGG..",
+                              "s2 TTACCGGT-GGCC",
+                              "s3 .-ACC-GTTGC--"])
+        self.assertEqual(phylip_str, expected)
+
+    def test_to_phylip_no_sequences(self):
+        with self.assertRaises(SequenceCollectionError):
+            npt.assert_warns(DeprecationWarning, Alignment([]).to_phylip)
+
+    def test_to_phylip_no_positions(self):
+        d1 = DNASequence('', id="d1")
+        d2 = DNASequence('', id="d2")
+        a = Alignment([d1, d2])
+
+        with self.assertRaises(SequenceCollectionError):
+            npt.assert_warns(DeprecationWarning, a.to_phylip)
+
+    def test_validate_lengths(self):
+        self.assertTrue(self.a1._validate_lengths())
+        self.assertTrue(self.a2._validate_lengths())
+        self.assertTrue(self.empty._validate_lengths())
+
+        self.assertTrue(Alignment([
+            DNASequence('TTT', id="d1")])._validate_lengths())
+
+
+class StockholmAlignmentTests(TestCase):
+    def setUp(self):
+        self.seqs = [DNASequence("ACC-G-GGTA", id="seq1"),
+                     DNASequence("TCC-G-GGCA", id="seq2")]
+        self.GF = OrderedDict([
+            ("AC", "RF00360"),
+            ("BM", ["cmbuild  -F CM SEED",
+                    "cmsearch  -Z 274931 -E 1000000"]),
+            ("SQ", "9"),
+            ("RT", ["TITLE1",  "TITLE2"]),
+            ("RN", ["[1]", "[2]"]),
+            ("RA", ["Auth1;", "Auth2;"]),
+            ("RL", ["J Mol Biol", "Cell"]),
+            ("RM", ["11469857", "12007400"]),
+            ('RN', ['[1]', '[2]'])
+        ])
+        self.GS = {"AC": OrderedDict([("seq1", "111"), ("seq2", "222")])}
+        self.GR = {"SS": OrderedDict([("seq1", "1110101111"),
+                                      ("seq2", "0110101110")])}
+        self.GC = {"SS_cons": "(((....)))"}
+        self.st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF,
+                                     gs=self.GS, gr=self.GR)
+
+    def test_retrieve_metadata(self):
+        self.assertEqual(self.st.gc, self.GC)
+        self.assertEqual(self.st.gf, self.GF)
+        self.assertEqual(self.st.gs, self.GS)
+        self.assertEqual(self.st.gr, self.GR)
+
+    def test_from_file_alignment(self):
+        # test that a basic stockholm file with interleaved alignment can be
+        # parsed
+        sto = StringIO("# STOCKHOLM 1.0\n"
+                       "seq1      ACC-G\n"
+                       "seq2      TCC-G\n\n"
+                       "seq1      -GGTA\n"
+                       "seq2      -GGCA\n//")
+        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
+        exp_sto = StockholmAlignment(self.seqs)
+        self.assertEqual(obs_sto, exp_sto)
+
+    def test_from_file_GF(self):
+        # remove rn line to make sure auto-added
+        self.GF.pop("RN")
+        sto = StringIO("# STOCKHOLM 1.0\n#=GF RN [1]\n#=GF RM 11469857\n"
+                       "#=GF RT TITLE1\n#=GF RA Auth1;\n#=GF RL J Mol Biol\n"
+                       "#=GF RN [2]\n#=GF RM 12007400\n#=GF RT TITLE2\n"
+                       "#=GF RA Auth2;\n#=GF RL Cell\n#=GF AC RF00360\n"
+                       "#=GF BM cmbuild  -F CM SEED\n"
+                       "#=GF BM cmsearch  -Z 274931 -E 1000000\n#=GF SQ 9\n"
+                       "seq1         ACC-G-GGTA\nseq2         TCC-G-GGCA\n//")
+        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
+        exp_sto = StockholmAlignment(self.seqs, self.GF, {}, {}, {})
+        self.assertEqual(obs_sto, exp_sto)
+
+    def test_from_file_GC(self):
+        sto = StringIO("# STOCKHOLM 1.0\n"
+                       "seq1         ACC-G-GGTA\nseq2         TCC-G-GGCA\n"
+                       "#=GC SS_cons (((....)))\n//")
+        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
+        exp_sto = StockholmAlignment(self.seqs, {}, {}, {}, self.GC)
+        self.assertEqual(obs_sto, exp_sto)
+
+    def test_from_file_GS(self):
+        sto = StringIO("# STOCKHOLM 1.0\n#=GS seq2 AC 222\n#=GS seq1 AC 111\n"
+                       "seq1          ACC-G-GGTA\n"
+                       "seq2          TCC-G-GGCA\n//")
+        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
+        exp_sto = StockholmAlignment(self.seqs, {}, self.GS, {}, {})
+        self.assertEqual(obs_sto, exp_sto)
+
+    def test_from_file_GR(self):
+        sto = StringIO("# STOCKHOLM 1.0\nseq1          ACC-G\n"
+                       "#=GR seq1 SS  11101\nseq2          TCC-G\n"
+                       "#=GR seq2 SS  01101\n\nseq1          -GGTA\n"
+                       "#=GR seq1 SS  01111\nseq2          -GGCA\n"
+                       "#=GR seq2 SS  01110\n//")
+        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
+        exp_sto = StockholmAlignment(self.seqs, {}, {}, self.GR, {})
+        self.assertEqual(obs_sto, exp_sto)
+
+    def test_from_file_multi(self):
+        sto = StringIO("# STOCKHOLM 1.0\n#=GS seq2 AC 222\n#=GS seq1 AC 111\n"
+                       "seq1          ACC-G-GGTA\n"
+                       "seq2          TCC-G-GGCA\n//\n"
+                       "# STOCKHOLM 1.0\nseq1          ACC-G-GGTA\n"
+                       "#=GR seq1 SS  1110101111\nseq2          TCC-G-GGCA\n"
+                       "#=GR seq2 SS  0110101110\n//")
+        obs_sto = StockholmAlignment.from_file(sto, DNA)
+        count = 0
+        for obs in obs_sto:
+            if count == 0:
+                exp_sto = StockholmAlignment(self.seqs, {}, self.GS, {}, {})
+                self.assertEqual(obs, exp_sto)
+            elif count == 1:
+                exp_sto = StockholmAlignment(self.seqs, {}, {}, self.GR, {})
+                self.assertEqual(obs, exp_sto)
+            else:
+                raise AssertionError("More than 2 sto alignments parsed!")
+            count += 1
+
+    def test_parse_gf_multiline_nh(self):
+        sto = ["#=GF TN MULTILINE TREE",
+               "#=GF NH THIS IS FIRST", "#=GF NH THIS IS SECOND",
+               "#=GF AC 1283394"]
+        exp = {'TN': 'MULTILINE TREE',
+               'NH': 'THIS IS FIRST THIS IS SECOND',
+               'AC': '1283394'}
+        self.assertEqual(self.st._parse_gf_info(sto), exp)
+
+    def test_parse_gf_multiline_cc(self):
+        sto = ["#=GF CC THIS IS FIRST", "#=GF CC THIS IS SECOND"]
+        exp = {'CC': 'THIS IS FIRST THIS IS SECOND'}
+        self.assertEqual(self.st._parse_gf_info(sto), exp)
+
+    def test_parse_gf_info_nongf(self):
+        sto = ["#=GF AC BLAAAAAAAHHH", "#=GC HUH THIS SHOULD NOT BE HERE"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gf_info(sto)
+
+    def test_parse_gf_info_malformed(self):
+        # too short of a line
+        sto = ["#=GF AC", "#=GF"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gf_info(sto)
+
+    def test_parse_gc_info_nongf(self):
+        sto = ["#=GC AC BLAAAAAAAHHH", "#=GF HUH THIS SHOULD NOT BE HERE"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gf_info(sto)
+
+    def test_parse_gc_info_strict_len(self):
+        sto = ["#=GC SS_cons (((..)))"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gc_info(sto, seqlen=20, strict=True)
+
+    def test_parse_gc_info_strict_duplicate(self):
+        sto = ["#=GC SS_cons (((..)))", "#=GC SS_cons (((..)))"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gc_info(sto, seqlen=8, strict=True)
+
+    def test_parse_gc_info_malformed(self):
+        # too short of a line
+        sto = ["#=GC AC BLAAAAAAAHHH", "#=GC"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gc_info(sto)
+
+    def test_parse_gs_gr_info_mixed(self):
+        sto = ["#=GS seq1 AC BLAAA", "#=GR seq2 HUH THIS SHOULD NOT BE HERE"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gs_gr_info(sto)
+
+    def test_parse_gs_gr_info_malformed(self):
+        # too short of a line
+        sto = ["#=GS AC BLAAAAAAAHHH", "#=GS"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gs_gr_info(sto)
+
+    def test_parse_gs_gr_info_strict(self):
+        sto = ["#=GR seq1 SS  10101111", "#=GR seq2 SS  01101"]
+        with self.assertRaises(StockholmParseError):
+            self.st._parse_gs_gr_info(sto, seqlen=20, strict=True)
+
+    def test_str(self):
+        st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF, gs=self.GS,
+                                gr=self.GR)
+        obs = str(st)
+        exp = ('# STOCKHOLM 1.0\n'
+               '#=GF AC RF00360\n'
+               '#=GF BM cmbuild  -F CM SEED\n'
+               '#=GF BM cmsearch  -Z 274931 -E 1000000\n'
+               '#=GF SQ 9\n'
+               '#=GF RN [1]\n'
+               '#=GF RM 11469857\n'
+               '#=GF RT TITLE1\n'
+               '#=GF RA Auth1;\n'
+               '#=GF RL J Mol Biol\n'
+               '#=GF RN [2]\n'
+               '#=GF RM 12007400\n'
+               '#=GF RT TITLE2\n'
+               '#=GF RA Auth2;\n'
+               '#=GF RL Cell\n'
+               '#=GS seq1 AC 111\n'
+               '#=GS seq2 AC 222\n'
+               'seq1          ACC-G-GGTA\n'
+               '#=GR seq1 SS  1110101111\n'
+               'seq2          TCC-G-GGCA\n'
+               '#=GR seq2 SS  0110101110\n'
+               '#=GC SS_cons  (((....)))\n//')
+        self.assertEqual(obs, exp)
+
+    def test_to_file(self):
+        st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF, gs=self.GS,
+                                gr=self.GR)
+
+        with tempfile.NamedTemporaryFile('r+') as temp_file:
+            st.to_file(temp_file)
+            temp_file.flush()
+            temp_file.seek(0)
+            obs = temp_file.read()
+            exp = ('# STOCKHOLM 1.0\n'
+                   '#=GF AC RF00360\n'
+                   '#=GF BM cmbuild  -F CM SEED\n'
+                   '#=GF BM cmsearch  -Z 274931 -E 1000000\n'
+                   '#=GF SQ 9\n'
+                   '#=GF RN [1]\n'
+                   '#=GF RM 11469857\n'
+                   '#=GF RT TITLE1\n'
+                   '#=GF RA Auth1;\n'
+                   '#=GF RL J Mol Biol\n'
+                   '#=GF RN [2]\n'
+                   '#=GF RM 12007400\n'
+                   '#=GF RT TITLE2\n'
+                   '#=GF RA Auth2;\n'
+                   '#=GF RL Cell\n'
+                   '#=GS seq1 AC 111\n'
+                   '#=GS seq2 AC 222\n'
+                   'seq1          ACC-G-GGTA\n'
+                   '#=GR seq1 SS  1110101111\n'
+                   'seq2          TCC-G-GGCA\n'
+                   '#=GR seq2 SS  0110101110\n'
+                   '#=GC SS_cons  (((....)))\n//')
+        self.assertEqual(obs, exp)
+
+    def test_str_gc(self):
+        st = StockholmAlignment(self.seqs, gc=self.GC, gf=None, gs=None,
+                                gr=None)
+        obs = str(st)
+        exp = ("# STOCKHOLM 1.0\nseq1          ACC-G-GGTA\n"
+               "seq2          TCC-G-GGCA\n"
+               "#=GC SS_cons  (((....)))\n//")
+        self.assertEqual(obs, exp)
+
+    def test_str_gf(self):
+        st = StockholmAlignment(self.seqs, gc=None, gf=self.GF, gs=None,
+                                gr=None)
+        obs = str(st)
+        exp = ('# STOCKHOLM 1.0\n'
+               '#=GF AC RF00360\n'
+               '#=GF BM cmbuild  -F CM SEED\n'
+               '#=GF BM cmsearch  -Z 274931 -E 1000000\n'
+               '#=GF SQ 9\n'
+               '#=GF RN [1]\n'
+               '#=GF RM 11469857\n'
+               '#=GF RT TITLE1\n'
+               '#=GF RA Auth1;\n'
+               '#=GF RL J Mol Biol\n'
+               '#=GF RN [2]\n'
+               '#=GF RM 12007400\n'
+               '#=GF RT TITLE2\n'
+               '#=GF RA Auth2;\n'
+               '#=GF RL Cell\n'
+               'seq1          ACC-G-GGTA\n'
+               'seq2          TCC-G-GGCA\n//')
+        self.assertEqual(obs, exp)
+
+    def test_str_gs(self):
+        st = StockholmAlignment(self.seqs, gc=None, gf=None, gs=self.GS,
+                                gr=None)
+        obs = str(st)
+        exp = ('# STOCKHOLM 1.0\n'
+               '#=GS seq1 AC 111\n'
+               '#=GS seq2 AC 222\n'
+               'seq1          ACC-G-GGTA\n'
+               'seq2          TCC-G-GGCA\n//')
+        self.assertEqual(obs, exp)
+
+    def test_str_gr(self):
+        st = StockholmAlignment(self.seqs, gc=None, gf=None, gs=None,
+                                gr=self.GR)
+        obs = str(st)
+        exp = ("# STOCKHOLM 1.0\nseq1          ACC-G-GGTA\n"
+               "#=GR seq1 SS  1110101111\nseq2          TCC-G-GGCA\n"
+               "#=GR seq2 SS  0110101110\n//")
+        self.assertEqual(obs, exp)
+
+    def test_str_trees(self):
+        GF = OrderedDict({"NH": ["IMATREE", "IMATREETOO"],
+                          "TN": ["Tree2", "Tree1"]})
+        st = StockholmAlignment(self.seqs, gc=None, gf=GF, gs=None,
+                                gr=None)
+        obs = str(st)
+        exp = ("# STOCKHOLM 1.0\n#=GF TN Tree2\n#=GF NH IMATREE\n#=GF TN Tree1"
+               "\n#=GF NH IMATREETOO\nseq1          ACC-G-GGTA\n"
+               "seq2          TCC-G-GGCA\n//")
+
+        self.assertEqual(obs, exp)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/alignment/tests/test_pairwise.py b/skbio/alignment/tests/test_pairwise.py
new file mode 100644
index 0000000..eed59da
--- /dev/null
+++ b/skbio/alignment/tests/test_pairwise.py
@@ -0,0 +1,585 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+import warnings
+
+import numpy as np
+
+from skbio import Protein, DNA, BiologicalSequence, Alignment
+from skbio.alignment import (
+    global_pairwise_align_protein, local_pairwise_align_protein,
+    global_pairwise_align_nucleotide, local_pairwise_align_nucleotide,
+    make_identity_substitution_matrix)
+from skbio.alignment._pairwise import (
+    _init_matrices_sw, _init_matrices_nw,
+    _compute_score_and_traceback_matrices, _traceback, _first_largest,
+    _get_seq_id, _compute_substitution_score)
+
+
+class PairwiseAlignmentTests(TestCase):
+    """
+        Note: In the high-level tests, the expected results were derived with
+        assistance from the EMBOSS web server:
+        http://www.ebi.ac.uk/Tools/psa/emboss_needle/
+        http://www.ebi.ac.uk/Tools/psa/emboss_water/
+        In some cases, placement of non-gap characters surrounded by gap
+        characters are slighly different between scikit-bio and the EMBOSS
+        server. These differences arise from arbitrary implementation
+        differences, and always result in the same score (which tells us that
+        the alignments are equivalent). In cases where the expected results
+        included here differ from those generated by the EMBOSS server, I note
+        the EMBOSS result as a comment below the expected value.
+
+    """
+
+    def setUp(self):
+        """Ignore warnings during tests."""
+        warnings.simplefilter("ignore")
+
+    def tearDown(self):
+        """Clear the list of warning filters, so that no filters are active."""
+        warnings.resetwarnings()
+
+    def test_make_identity_substitution_matrix(self):
+        expected = {'A': {'A':  1, 'C': -2, 'G': -2, 'T': -2, 'U': -2},
+                    'C': {'A': -2, 'C':  1, 'G': -2, 'T': -2, 'U': -2},
+                    'G': {'A': -2, 'C': -2, 'G':  1, 'T': -2, 'U': -2},
+                    'T': {'A': -2, 'C': -2, 'G': -2, 'T':  1, 'U': -2},
+                    'U': {'A': -2, 'C': -2, 'G': -2, 'T': -2, 'U':  1}}
+        self.assertEqual(make_identity_substitution_matrix(1, -2), expected)
+
+        expected = {'A': {'A':  5, 'C': -4, 'G': -4, 'T': -4, 'U': -4},
+                    'C': {'A': -4, 'C':  5, 'G': -4, 'T': -4, 'U': -4},
+                    'G': {'A': -4, 'C': -4, 'G':  5, 'T': -4, 'U': -4},
+                    'T': {'A': -4, 'C': -4, 'G': -4, 'T':  5, 'U': -4},
+                    'U': {'A': -4, 'C': -4, 'G': -4, 'T': -4, 'U':  5}}
+        self.assertEqual(make_identity_substitution_matrix(5, -4), expected)
+
+    def test_global_pairwise_align_protein(self):
+        expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
+        actual = global_pairwise_align_protein(
+            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=10.,
+            gap_extend_penalty=5.)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        expected = ("HEAGAWGHE-E", "---PAW-HEAE", 30.0)
+        # EMBOSS result: P---AW-HEAE
+        actual = global_pairwise_align_protein(
+            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=5.,
+            gap_extend_penalty=0.5)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        # Protein (rather than str) as input
+        expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
+        actual = global_pairwise_align_protein(
+            Protein("HEAGAWGHEE", "s1"), Protein("PAWHEAE", "s2"),
+            gap_open_penalty=10., gap_extend_penalty=5.)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
+        self.assertEqual(actual.ids(), ["s1", "s2"])
+
+        # One Alignment and one Protein as input
+        expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
+        actual = global_pairwise_align_protein(
+            Alignment([Protein("HEAGAWGHEE", "s1")]),
+            Protein("PAWHEAE", "s2"),
+            gap_open_penalty=10., gap_extend_penalty=5.)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
+        self.assertEqual(actual.ids(), ["s1", "s2"])
+
+        # One single-sequence alignment as input and one double-sequence
+        # alignment as input. Score confirmed manually.
+        expected = ("HEAGAWGHEE-", "HDAGAWGHDE-", "---PAW-HEAE", 21.0)
+        actual = global_pairwise_align_protein(
+            Alignment([Protein("HEAGAWGHEE", "s1"),
+                       Protein("HDAGAWGHDE", "s2")]),
+            Alignment([Protein("PAWHEAE", "s3")]),
+            gap_open_penalty=10., gap_extend_penalty=5.)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(str(actual[2]), expected[2])
+        self.assertEqual(actual.score(), expected[3])
+        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
+        self.assertEqual(actual.ids(), ["s1", "s2", "s3"])
+
+        # ids are provided if they're not passed in
+        actual = global_pairwise_align_protein(
+            Protein("HEAGAWGHEE"), Protein("PAWHEAE"),
+            gap_open_penalty=10., gap_extend_penalty=5.)
+        self.assertEqual(actual.ids(), list('01'))
+
+        # TypeError on invalid input
+        self.assertRaises(TypeError, global_pairwise_align_protein,
+                          42, "HEAGAWGHEE")
+        self.assertRaises(TypeError, global_pairwise_align_protein,
+                          "HEAGAWGHEE", 42)
+
+    def test_global_pairwise_align_protein_penalize_terminal_gaps(self):
+        expected = ("HEAGAWGHEE", "---PAWHEAE", 1.0)
+        actual = global_pairwise_align_protein(
+            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=10.,
+            gap_extend_penalty=5., penalize_terminal_gaps=True)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 9), (0, 6)])
+        self.assertEqual(actual.ids(), list('01'))
+
+    def test_global_pairwise_align_nucleotide_penalize_terminal_gaps(self):
+        # in these tests one sequence is about 3x the length of the other.
+        # we toggle penalize_terminal_gaps to confirm that it results in
+        # different alignments and alignment scores.
+        seq1 = "ACCGTGGACCGTTAGGATTGGACCCAAGGTTG"
+        seq2 = "T"*25 + "ACCGTGGACCGTAGGATTGGACCAAGGTTA" + "A"*25
+
+        aln1 = ("-------------------------ACCGTGGACCGTTAGGA"
+                "TTGGACCCAAGGTTG-------------------------")
+        aln2 = ("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
+                "TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")
+        expected = (aln1, aln2, 131.0)
+        actual = global_pairwise_align_nucleotide(
+            seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
+            match_score=5, mismatch_score=-4, penalize_terminal_gaps=False)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+
+        aln1 = ("-------------------------ACCGTGGACCGTTAGGA"
+                "TTGGACCCAAGGTT-------------------------G")
+        aln2 = ("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
+                "TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")
+        expected = (aln1, aln2, 97.0)
+        actual = global_pairwise_align_nucleotide(
+            seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
+            match_score=5, mismatch_score=-4, penalize_terminal_gaps=True)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+
+    def test_local_pairwise_align_protein(self):
+        expected = ("AWGHE", "AW-HE", 26.0, 4, 1)
+        actual = local_pairwise_align_protein(
+            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=10.,
+            gap_extend_penalty=5.)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(4, 8), (1, 4)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        expected = ("AWGHE-E", "AW-HEAE", 32.0, 4, 1)
+        actual = local_pairwise_align_protein(
+            "HEAGAWGHEE", "PAWHEAE", gap_open_penalty=5.,
+            gap_extend_penalty=0.5)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(4, 9), (1, 6)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        expected = ("AWGHE", "AW-HE", 26.0, 4, 1)
+        # Protein (rather than str) as input
+        actual = local_pairwise_align_protein(
+            Protein("HEAGAWGHEE", "s1"), Protein("PAWHEAE", "s2"),
+            gap_open_penalty=10., gap_extend_penalty=5.)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(4, 8), (1, 4)])
+        self.assertEqual(actual.ids(), ["s1", "s2"])
+
+        # Fails when either input is passed as an Alignment
+        self.assertRaises(TypeError, local_pairwise_align_protein,
+                          Alignment([Protein("HEAGAWGHEE", "s1")]),
+                          Protein("PAWHEAE", "s2"), gap_open_penalty=10.,
+                          gap_extend_penalty=5.)
+        self.assertRaises(TypeError, local_pairwise_align_protein,
+                          Protein("HEAGAWGHEE", "s1"),
+                          Alignment([Protein("PAWHEAE", "s2")]),
+                          gap_open_penalty=10., gap_extend_penalty=5.)
+
+        # ids are provided if they're not passed in
+        actual = local_pairwise_align_protein(
+            Protein("HEAGAWGHEE"), Protein("PAWHEAE"),
+            gap_open_penalty=10., gap_extend_penalty=5.)
+        self.assertEqual(actual.ids(), list('01'))
+
+        # TypeError on invalid input
+        self.assertRaises(TypeError, local_pairwise_align_protein,
+                          42, "HEAGAWGHEE")
+        self.assertRaises(TypeError, local_pairwise_align_protein,
+                          "HEAGAWGHEE", 42)
+
+    def test_global_pairwise_align_nucleotide(self):
+        expected = ("G-ACCTTGACCAGGTACC", "GAACTTTGAC---GTAAC", 41.0, 0, 0)
+        actual = global_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=5.,
+            gap_extend_penalty=0.5, match_score=5, mismatch_score=-4)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        expected = ("-GACCTTGACCAGGTACC", "GAACTTTGAC---GTAAC", 32.0, 0, 0)
+        actual = global_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
+            gap_extend_penalty=0.5, match_score=5, mismatch_score=-4)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        # DNA (rather than str) as input
+        expected = ("-GACCTTGACCAGGTACC", "GAACTTTGAC---GTAAC", 32.0, 0, 0)
+        actual = global_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC", "s1"), DNA("GAACTTTGACGTAAC", "s2"),
+            gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
+            mismatch_score=-4)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
+        self.assertEqual(actual.ids(), ["s1", "s2"])
+
+        # Align one DNA sequence and one Alignment, score computed manually
+        expected = ("-GACCTTGACCAGGTACC", "-GACCATGACCAGGTACC",
+                    "GAACTTTGAC---GTAAC", 27.5, 0, 0)
+        actual = global_pairwise_align_nucleotide(
+            Alignment([DNA("GACCTTGACCAGGTACC", "s1"),
+                       DNA("GACCATGACCAGGTACC", "s2")]),
+            DNA("GAACTTTGACGTAAC", "s3"),
+            gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
+            mismatch_score=-4)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(str(actual[2]), expected[2])
+        self.assertEqual(actual.score(), expected[3])
+        self.assertEqual(actual.start_end_positions(), [(0, 16), (0, 14)])
+        self.assertEqual(actual.ids(), ["s1", "s2", "s3"])
+
+        # ids are provided if they're not passed in
+        actual = global_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
+            mismatch_score=-4)
+        self.assertEqual(actual.ids(), list('01'))
+
+        # TypeError on invalid input
+        self.assertRaises(TypeError, global_pairwise_align_nucleotide,
+                          42, "HEAGAWGHEE")
+        self.assertRaises(TypeError, global_pairwise_align_nucleotide,
+                          "HEAGAWGHEE", 42)
+
+    def test_local_pairwise_align_nucleotide(self):
+        expected = ("ACCTTGACCAGGTACC", "ACTTTGAC---GTAAC", 41.0, 1, 2)
+        actual = local_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=5.,
+            gap_extend_penalty=0.5, match_score=5, mismatch_score=-4)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(1, 16), (2, 14)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        expected = ("ACCTTGAC", "ACTTTGAC", 31.0, 1, 2)
+        actual = local_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
+            gap_extend_penalty=5., match_score=5, mismatch_score=-4)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(1, 8), (2, 9)])
+        self.assertEqual(actual.ids(), list('01'))
+
+        # DNA (rather than str) as input
+        expected = ("ACCTTGAC", "ACTTTGAC", 31.0, 1, 2)
+        actual = local_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC", "s1"), DNA("GAACTTTGACGTAAC", "s2"),
+            gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
+            mismatch_score=-4)
+        self.assertEqual(str(actual[0]), expected[0])
+        self.assertEqual(str(actual[1]), expected[1])
+        self.assertEqual(actual.score(), expected[2])
+        self.assertEqual(actual.start_end_positions(), [(1, 8), (2, 9)])
+        self.assertEqual(actual.ids(), ["s1", "s2"])
+
+        # Fails when either input is passed as an Alignment
+        self.assertRaises(TypeError, local_pairwise_align_nucleotide,
+                          Alignment([DNA("GACCTTGACCAGGTACC", "s1")]),
+                          DNA("GAACTTTGACGTAAC", "s2"),
+                          gap_open_penalty=10., gap_extend_penalty=5.,
+                          match_score=5, mismatch_score=-4)
+        self.assertRaises(TypeError, local_pairwise_align_nucleotide,
+                          DNA("GACCTTGACCAGGTACC", "s1"),
+                          Alignment([DNA("GAACTTTGACGTAAC", "s2")]),
+                          gap_open_penalty=10., gap_extend_penalty=5.,
+                          match_score=5, mismatch_score=-4)
+
+        # ids are provided if they're not passed in
+        actual = local_pairwise_align_nucleotide(
+            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
+            mismatch_score=-4)
+        self.assertEqual(actual.ids(), list('01'))
+
+        # TypeError on invalid input
+        self.assertRaises(TypeError, local_pairwise_align_nucleotide,
+                          42, "HEAGAWGHEE")
+        self.assertRaises(TypeError, local_pairwise_align_nucleotide,
+                          "HEAGAWGHEE", 42)
+
+    def test_nucleotide_aligners_use_substitution_matrices(self):
+        alt_sub = make_identity_substitution_matrix(10, -10)
+        # alternate substitution matrix yields different alignment (the
+        # aligned sequences and the scores are different) with local alignment
+        actual_no_sub = local_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
+            gap_extend_penalty=5., match_score=5, mismatch_score=-4)
+        actual_alt_sub = local_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
+            gap_extend_penalty=5., match_score=5, mismatch_score=-4,
+            substitution_matrix=alt_sub)
+        self.assertNotEqual(str(actual_no_sub[0]), str(actual_alt_sub[0]))
+        self.assertNotEqual(str(actual_no_sub[1]), str(actual_alt_sub[1]))
+        self.assertNotEqual(actual_no_sub.score(),
+                            actual_alt_sub.score())
+
+        # alternate substitution matrix yields different alignment (the
+        # aligned sequences and the scores are different) with global alignment
+        actual_no_sub = local_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
+            gap_extend_penalty=5., match_score=5, mismatch_score=-4)
+        actual_alt_sub = global_pairwise_align_nucleotide(
+            "GACCTTGACCAGGTACC", "GAACTTTGACGTAAC", gap_open_penalty=10.,
+            gap_extend_penalty=5., match_score=5, mismatch_score=-4,
+            substitution_matrix=alt_sub)
+        self.assertNotEqual(str(actual_no_sub[0]), str(actual_alt_sub[0]))
+        self.assertNotEqual(str(actual_no_sub[1]), str(actual_alt_sub[1]))
+        self.assertNotEqual(actual_no_sub.score(),
+                            actual_alt_sub.score())
+
+    def test_init_matrices_sw(self):
+        expected_score_m = np.zeros((5, 4))
+        expected_tback_m = [[0, 0, 0, 0],
+                            [0, -1, -1, -1],
+                            [0, -1, -1, -1],
+                            [0, -1, -1, -1],
+                            [0, -1, -1, -1]]
+        actual_score_m, actual_tback_m = _init_matrices_sw(
+            Alignment([DNA('AAA')]), Alignment([DNA('AAAA')]), 5, 2)
+        np.testing.assert_array_equal(actual_score_m, expected_score_m)
+        np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
+
+    def test_init_matrices_nw(self):
+        expected_score_m = [[0, -5, -7, -9],
+                            [-5, 0, 0, 0],
+                            [-7, 0, 0, 0],
+                            [-9, 0, 0, 0],
+                            [-11, 0, 0, 0]]
+        expected_tback_m = [[0, 3, 3, 3],
+                            [2, -1, -1, -1],
+                            [2, -1, -1, -1],
+                            [2, -1, -1, -1],
+                            [2, -1, -1, -1]]
+        actual_score_m, actual_tback_m = _init_matrices_nw(
+            Alignment([DNA('AAA')]), Alignment([DNA('AAAA')]), 5, 2)
+        np.testing.assert_array_equal(actual_score_m, expected_score_m)
+        np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
+
+    def test_compute_substitution_score(self):
+        # these results were computed manually
+        subs_m = make_identity_substitution_matrix(5, -4)
+        self.assertEqual(
+            _compute_substitution_score(['A'], ['A'], subs_m, 0), 5.0)
+        self.assertEqual(
+            _compute_substitution_score(['A', 'A'], ['A'], subs_m, 0), 5.0)
+        self.assertEqual(
+            _compute_substitution_score(['A', 'C'], ['A'], subs_m, 0), 0.5)
+        self.assertEqual(
+            _compute_substitution_score(['A', 'C'], ['A', 'C'], subs_m, 0),
+            0.5)
+        self.assertEqual(
+            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0),
+            2.5)
+        self.assertEqual(
+            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 1), 3)
+
+        # alt subs_m
+        subs_m = make_identity_substitution_matrix(1, -2)
+        self.assertEqual(
+            _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0),
+            0.5)
+
+    def test_compute_score_and_traceback_matrices(self):
+        # these results were computed manually
+        expected_score_m = [[0, -5, -7, -9],
+                            [-5, 2, -3, -5],
+                            [-7, -3, 4, -1],
+                            [-9, -5, -1, 6],
+                            [-11, -7, -3, 1]]
+        expected_tback_m = [[0, 3, 3, 3],
+                            [2, 1, 3, 3],
+                            [2, 2, 1, 3],
+                            [2, 2, 2, 1],
+                            [2, 2, 2, 2]]
+        m = make_identity_substitution_matrix(2, -1)
+        actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
+            Alignment([DNA('ACG')]),
+            Alignment([DNA('ACGT')]), 5, 2, m)
+        np.testing.assert_array_equal(actual_score_m, expected_score_m)
+        np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
+
+        # different sequences
+        # these results were computed manually
+        expected_score_m = [[0, -5, -7, -9],
+                            [-5, 2, -3, -5],
+                            [-7, -3, 4, -1],
+                            [-9, -5, -1, 3],
+                            [-11, -7, -3, -2]]
+        expected_tback_m = [[0, 3, 3, 3],
+                            [2, 1, 3, 3],
+                            [2, 2, 1, 3],
+                            [2, 2, 2, 1],
+                            [2, 2, 2, 1]]
+        m = make_identity_substitution_matrix(2, -1)
+        actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
+            Alignment([DNA('ACC')]),
+            Alignment([DNA('ACGT')]), 5, 2, m)
+        np.testing.assert_array_equal(actual_score_m, expected_score_m)
+        np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
+
+        # four sequences provided in two alignments
+        # these results were computed manually
+        expected_score_m = [[0, -5, -7, -9],
+                            [-5, 2, -3, -5],
+                            [-7, -3, 4, -1],
+                            [-9, -5, -1, 3],
+                            [-11, -7, -3, -2]]
+        expected_tback_m = [[0, 3, 3, 3],
+                            [2, 1, 3, 3],
+                            [2, 2, 1, 3],
+                            [2, 2, 2, 1],
+                            [2, 2, 2, 1]]
+        m = make_identity_substitution_matrix(2, -1)
+        actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
+            Alignment([DNA('ACC', 's1'), DNA('ACC', 's2')]),
+            Alignment([DNA('ACGT', 's3'), DNA('ACGT', 's4')]), 5, 2, m)
+        np.testing.assert_array_equal(actual_score_m, expected_score_m)
+        np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
+
+    def test_compute_score_and_traceback_matrices_invalid(self):
+        # if the sequence contains a character that is not in the
+        # substitution matrix, an informative error should be raised
+        m = make_identity_substitution_matrix(2, -1)
+        self.assertRaises(ValueError, _compute_score_and_traceback_matrices,
+                          Alignment([DNA('AWG')]),
+                          Alignment([DNA('ACGT')]), 5, 2, m)
+
+    def test_traceback(self):
+        score_m = [[0, -5, -7, -9],
+                   [-5, 2, -3, -5],
+                   [-7, -3, 4, -1],
+                   [-9, -5, -1, 6],
+                   [-11, -7, -3, 1]]
+        score_m = np.array(score_m)
+        tback_m = [[0, 3, 3, 3],
+                   [2, 1, 3, 3],
+                   [2, 2, 1, 3],
+                   [2, 2, 2, 1],
+                   [2, 2, 2, 2]]
+        tback_m = np.array(tback_m)
+        # start at bottom-right
+        expected = ([BiologicalSequence("ACG-")],
+                    [BiologicalSequence("ACGT")], 1, 0, 0)
+        actual = _traceback(tback_m, score_m, Alignment([DNA('ACG')]),
+                            Alignment([DNA('ACGT')]), 4, 3)
+        self.assertEqual(actual, expected)
+
+        # four sequences in two alignments
+        score_m = [[0, -5, -7, -9],
+                   [-5, 2, -3, -5],
+                   [-7, -3, 4, -1],
+                   [-9, -5, -1, 6],
+                   [-11, -7, -3, 1]]
+        score_m = np.array(score_m)
+        tback_m = [[0, 3, 3, 3],
+                   [2, 1, 3, 3],
+                   [2, 2, 1, 3],
+                   [2, 2, 2, 1],
+                   [2, 2, 2, 2]]
+        tback_m = np.array(tback_m)
+        # start at bottom-right
+        expected = ([BiologicalSequence("ACG-"), BiologicalSequence("ACG-")],
+                    [BiologicalSequence("ACGT"), BiologicalSequence("ACGT")],
+                    1, 0, 0)
+        actual = _traceback(tback_m, score_m,
+                            Alignment([DNA('ACG', 's1'), DNA('ACG', 's2')]),
+                            Alignment([DNA('ACGT', 's3'), DNA('ACGT', 's4')]),
+                            4, 3)
+        self.assertEqual(actual, expected)
+
+        # start at highest-score
+        expected = ([BiologicalSequence("ACG")],
+                    [BiologicalSequence("ACG")], 6, 0, 0)
+        actual = _traceback(tback_m, score_m, Alignment([DNA('ACG')]),
+                            Alignment([DNA('ACGT')]), 3, 3)
+        self.assertEqual(actual, expected)
+
+        # terminate traceback before top-right
+        tback_m = [[0, 3, 3, 3],
+                   [2, 1, 3, 3],
+                   [2, 2, 0, 3],
+                   [2, 2, 2, 1],
+                   [2, 2, 2, 2]]
+        tback_m = np.array(tback_m)
+        expected = ("G", "G", 6, 2, 2)
+        expected = ([BiologicalSequence("G")],
+                    [BiologicalSequence("G")], 6, 2, 2)
+        actual = _traceback(tback_m, score_m, Alignment([DNA('ACG')]),
+                            Alignment([DNA('ACGT')]), 3, 3)
+        self.assertEqual(actual, expected)
+
+    def test_get_seq_id(self):
+        self.assertEqual(_get_seq_id("AAA", "hello"), "hello")
+        self.assertEqual(_get_seq_id(DNA("AAA"), "hello"), "hello")
+        self.assertEqual(_get_seq_id(DNA("AAA", "s1"), "hello"), "s1")
+
+    def test_first_largest(self):
+        l = [(5, 'a'), (5, 'b'), (5, 'c')]
+        self.assertEqual(_first_largest(l), (5, 'a'))
+        l = [(5, 'c'), (5, 'b'), (5, 'a')]
+        self.assertEqual(_first_largest(l), (5, 'c'))
+        l = [(5, 'c'), (6, 'b'), (5, 'a')]
+        self.assertEqual(_first_largest(l), (6, 'b'))
+        # works for more than three entries
+        l = [(5, 'c'), (6, 'b'), (5, 'a'), (7, 'd')]
+        self.assertEqual(_first_largest(l), (7, 'd'))
+        # Note that max([(5, 'a'), (5, 'c')]) == max([(5, 'c'), (5, 'a')])
+        # but for the purposes needed here, we want the max to be the same
+        # regardless of what the second item in the tuple is.
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/alignment/tests/test_ssw.py b/skbio/alignment/tests/test_ssw.py
new file mode 100644
index 0000000..2f6a66b
--- /dev/null
+++ b/skbio/alignment/tests/test_ssw.py
@@ -0,0 +1,739 @@
+# -----------------------------------------------------------------------------
+#  Copyright (c) 2013--, scikit-bio development team.
+#
+#  Distributed under the terms of the Modified BSD License.
+#
+#  The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+# Special thanks to http://www.faculty.ucr.edu/~mmaduro/random.htm for the
+# random DNA generator.
+
+# These tests confirm that StripedSmithWaterman returns the same results as
+# SSW. We don't test for correctness of those results (i.e., we assume that
+# ssw.c and ssw.h are correct) as that testing is beyond the scope of skbio.
+# Furthermore all expected results are created by running StripedSmithWaterman
+# the resulting alignments are verified by hand. Creating tests from the base
+# C API is impractical at this time.
+
+from unittest import TestCase, main
+
+from skbio import local_pairwise_align_ssw
+from skbio.alignment import StripedSmithWaterman, AlignmentStructure
+from skbio.alignment._pairwise import blosum50
+
+
+class TestSSW(TestCase):
+
+    align_attributes = [
+        "optimal_alignment_score", "suboptimal_alignment_score",
+        "target_begin", "target_end_optimal", "target_end_suboptimal",
+        "query_begin", "query_end", "cigar", "query_sequence",
+        "target_sequence"
+    ]
+
+    def _check_alignment(self, alignment, expected):
+        for attribute in self.align_attributes:
+            # The first element of this tuple is to identify
+            # the broken sequence if one should fail
+            self.assertEqual((expected['target_sequence'],
+                              expected[attribute]),
+                             (alignment['target_sequence'],
+                              alignment[attribute]))
+
+    def _check_argument_with_inequality_on_optimal_align_score(
+            self,
+            query_sequences=None,
+            target_sequences=None,
+            arg=None,
+            default=None,
+            i_range=None,
+            compare_lt=None,
+            compare_gt=None):
+        iterable_kwarg = {}
+        default_kwarg = {}
+        default_kwarg[arg] = default
+        for query_sequence in query_sequences:
+            for target_sequence in target_sequences:
+                for i in i_range:
+                    iterable_kwarg[arg] = i
+                    query1 = StripedSmithWaterman(query_sequence,
+                                                  **iterable_kwarg)
+                    align1 = query1(target_sequence)
+
+                    query2 = StripedSmithWaterman(query_sequence,
+                                                  **default_kwarg)
+                    align2 = query2(target_sequence)
+
+                    if i == default:
+                        self.assertEqual(align1.optimal_alignment_score,
+                                         align2.optimal_alignment_score)
+                    if i < default:
+                        compare_lt(align1.optimal_alignment_score,
+                                   align2.optimal_alignment_score)
+                    if i > default:
+                        compare_gt(align1.optimal_alignment_score,
+                                   align2.optimal_alignment_score)
+
+    def _check_bit_flag_sets_properties_falsy_or_negative(
+            self,
+            query_sequences=None,
+            target_sequences=None,
+            arg_settings=[],
+            properties_to_null=[]):
+        kwarg = {}
+
+        def falsy_or_negative(alignment, prop):
+            if type(alignment[prop]) is int:
+                return alignment[prop] < 0
+            else:
+                return not alignment[prop]
+
+        for query_sequence in query_sequences:
+            for target_sequence in target_sequences:
+                for arg, setting in arg_settings:
+                    kwarg[arg] = setting
+                query = StripedSmithWaterman(query_sequence, **kwarg)
+                alignment = query(target_sequence)
+                for prop in properties_to_null:
+                    self.assertTrue(falsy_or_negative(alignment, prop))
+                # Every property not in our null list
+                for prop in [p for p in self.align_attributes
+                             if p not in properties_to_null]:
+                    self.assertFalse(falsy_or_negative(alignment, prop))
+
+
+class TestStripedSmithWaterman(TestSSW):
+
+    def test_object_is_reusable(self):
+        q_seq = "AGGGTAATTAGGCGTGTTCACCTA"
+        expected_alignments = [
+            {
+                'optimal_alignment_score': 10,
+                'suboptimal_alignment_score': 10,
+                'query_begin': 4,
+                'query_end': 8,
+                'target_begin': 3,
+                'target_end_optimal': 7,
+                'target_end_suboptimal': 34,
+                'cigar': '5M',
+                'query_sequence': q_seq,
+                'target_sequence': ('TTATAATTTTCTTATTATTATCAATATTTATAATTTGATTT'
+                                    'TGTTGTAAT')
+            },
+            {
+                'optimal_alignment_score': 36,
+                'suboptimal_alignment_score': 16,
+                'query_begin': 0,
+                'query_end': 23,
+                'target_begin': 6,
+                'target_end_optimal': 29,
+                'target_end_suboptimal': 13,
+                'cigar': '8M1D8M1I7M',
+                'query_sequence': q_seq,
+                'target_sequence': 'AGTCGAAGGGTAATATAGGCGTGTCACCTA'
+            },
+            {
+                'optimal_alignment_score': 16,
+                'suboptimal_alignment_score': 0,
+                'query_begin': 0,
+                'query_end': 7,
+                'target_begin': 6,
+                'target_end_optimal': 13,
+                'target_end_suboptimal': 0,
+                'cigar': '8M',
+                'query_sequence': q_seq,
+                'target_sequence': 'AGTCGAAGGGTAATA'
+            },
+            {
+                'optimal_alignment_score': 8,
+                'suboptimal_alignment_score': 8,
+                'query_begin': 0,
+                'query_end': 3,
+                'target_begin': 7,
+                'target_end_optimal': 10,
+                'target_end_suboptimal': 42,
+                'cigar': '4M',
+                'query_sequence': q_seq,
+                'target_sequence': ('CTGCCTCAGGGGGAGGAAAGCGTCAGCGCGGCTGCCGTCGG'
+                                    'CGCAGGGGC')
+            },
+            {
+                'optimal_alignment_score': 48,
+                'suboptimal_alignment_score': 16,
+                'query_begin': 0,
+                'query_end': 23,
+                'target_begin': 0,
+                'target_end_optimal': 23,
+                'target_end_suboptimal': 7,
+                'cigar': '24M',
+                'query_sequence': q_seq,
+                'target_sequence': q_seq
+            }
+        ]
+        query = StripedSmithWaterman(q_seq)
+        results = []
+        for expected in expected_alignments:
+            alignment = query(expected['target_sequence'])
+            results.append(alignment)
+
+        for result, expected in zip(results, expected_alignments):
+            self._check_alignment(result, expected)
+
+    def test_regression_on_instantiation_arguments(self):
+        expected = {
+            'optimal_alignment_score': 23,
+            'suboptimal_alignment_score': 10,
+            'query_begin': 0,
+            'query_end': 16,
+            'target_begin': 0,
+            'target_end_optimal': 20,
+            'target_end_suboptimal': 4,
+            'cigar': '6M4D11M',
+            'query_sequence': 'AAACGATAAATCCGCGTA',
+            'target_sequence': 'AAACGACTACTAAATCCGCGTGATAGGGGA'
+        }
+        query = StripedSmithWaterman(expected['query_sequence'],
+                                     gap_open_penalty=5,
+                                     gap_extend_penalty=2,
+                                     score_size=2,
+                                     mask_length=15,
+                                     mask_auto=True,
+                                     score_only=False,
+                                     score_filter=None,
+                                     distance_filter=None,
+                                     override_skip_babp=False,
+                                     protein=False,
+                                     match_score=2,
+                                     mismatch_score=-3,
+                                     substitution_matrix=None,
+                                     suppress_sequences=False,
+                                     zero_index=True)
+        alignment = query(expected['target_sequence'])
+        self._check_alignment(alignment, expected)
+
+    def test_protein_sequence_is_usable(self):
+        expected = {
+            'optimal_alignment_score': 316,
+            'suboptimal_alignment_score': 95,
+            'query_begin': 0,
+            'query_end': 52,
+            'target_begin': 0,
+            'target_end_optimal': 52,
+            'target_end_suboptimal': 18,
+            'cigar': '15M1D15M1I22M',
+            'query_sequence': ('VHLTGEEKSAVAALWGKVNVDEVGGEALGRXLLVVYPWTQRFFESF'
+                               'SDLSTPDABVMSNPKVKAHGK'),
+            'target_sequence': ('VHLTPEEKSAVTALWBGKVNVDEVGGEALGRLLVVYPWTQRFFES'
+                                'FGDLSTPD*')
+        }
+        query = StripedSmithWaterman(expected['query_sequence'],
+                                     protein=True,
+                                     substitution_matrix=blosum50)
+        alignment = query(expected['target_sequence'])
+        self._check_alignment(alignment, expected)
+
+    def test_lowercase_is_valid_sequence(self):
+        expected = {
+            'optimal_alignment_score': 23,
+            'suboptimal_alignment_score': 10,
+            'query_begin': 0,
+            'query_end': 16,
+            'target_begin': 0,
+            'target_end_optimal': 20,
+            'target_end_suboptimal': 4,
+            'cigar': '6M4D11M',
+            'query_sequence': 'aaacgataaatccgcgta',
+            'target_sequence': 'aaacgactactaaatccgcgtgatagggga'
+        }
+        query = StripedSmithWaterman(expected['query_sequence'])
+        alignment = query(expected['target_sequence'])
+        self._check_alignment(alignment, expected)
+
+    def test_align_with_N_in_nucleotide_sequence(self):
+        expected = {
+            'optimal_alignment_score': 9,
+            'suboptimal_alignment_score': 0,
+            'query_begin': 0,
+            'query_end': 8,
+            'target_begin': 0,
+            'target_end_optimal': 9,
+            'target_end_suboptimal': 0,
+            'cigar': '4M1D5M',
+            'query_sequence': 'ACTCANNATCGANCTAGC',
+            'target_sequence': 'ACTCGAAAATGTNNGCA'
+        }
+        query = StripedSmithWaterman(expected['query_sequence'])
+        alignment = query(expected['target_sequence'])
+        self._check_alignment(alignment, expected)
+
+    def test_arg_match_score(self):
+        query_sequences = [
+            "TTTTTTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTCAATATAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "CTGCCTCAAGGGGGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
+            "AGGGTAATTTTAGGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_argument_with_inequality_on_optimal_align_score(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            arg='match_score',
+            default=2,
+            i_range=range(0, 5),
+            compare_lt=self.assertLess,
+            compare_gt=self.assertGreater
+        )
+        # The above is a strict bound, so we don't need a expected align
+
+    def test_arg_mismatch_score(self):
+        query_sequences = [
+            "TTATAATTAATTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTAAGGGGTATAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "CTGCCTCAGGGGCGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
+            "AGGGTAATTAGCGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_argument_with_inequality_on_optimal_align_score(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            arg='mismatch_score',
+            default=-3,
+            i_range=range(-6, 1),
+            # These are intentionally inverted
+            compare_lt=self.assertLessEqual,
+            compare_gt=self.assertGreaterEqual
+        )
+        # The above is not a strict bound, so lets use an expected align
+        # to plug the hole where every align is exactly equal to default
+        expected = {
+            'optimal_alignment_score': 8,
+            'suboptimal_alignment_score': 0,
+            'query_begin': 5,
+            'query_end': 8,
+            'target_begin': 10,
+            'target_end_optimal': 13,
+            'target_end_suboptimal': 0,
+            'cigar': '4M',
+            'query_sequence': 'AGAGGGTAATCAGCCGTGTCCACCGGAACACAACGCTATCGGGCGA',
+            'target_sequence': 'GTTCGCCCCAGTAAAGTTGCTACCAAATCCGCATG'
+        }
+        query = StripedSmithWaterman(expected['query_sequence'],
+                                     mismatch_score=-8)
+        alignment = query(expected['target_sequence'])
+        self._check_alignment(alignment, expected)
+
+    def test_arg_matrix_overrides_match_and_mismatch(self):
+        query_sequences = [
+            "TTATAATTAATTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTAAGGGGTATAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "CTGCCTCAGGGGCGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
+            "AGGGTAATTAGCGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        matrix = {  # This is a biologically meaningless matrix
+            "A": {"A": 4,  "T": -1, "C": -2, "G": -3, "N": 4},
+            "T": {"A": -1, "T": 1,  "C": -1, "G": -4, "N": 1},
+            "C": {"A": -2, "T": -1, "C": 10, "G": 1,  "N": 1},
+            "G": {"A": -3, "T": -4, "C": 1,  "G": 3,  "N": 1},
+            "N": {"A": 4,  "T": 1,  "C": 1,  "G": 1,  "N": 0}
+        }
+        for query_sequence in query_sequences:
+            for target_sequence in target_sequences:
+                query1 = StripedSmithWaterman(query_sequence)
+                align1 = query1(target_sequence)
+
+                query2 = StripedSmithWaterman(query_sequence,
+                                              substitution_matrix=matrix)
+                align2 = query2(target_sequence)
+
+                self.assertNotEqual(align1.optimal_alignment_score,
+                                    align2.optimal_alignment_score)
+
+    def test_arg_gap_open_penalty(self):
+        query_sequences = [
+            "TTATAATTTTCTTAGTTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCCGAAGGGTAATATAGGCGTGTCACCTA",
+            "AGTCGAAGGCGGTAATA",
+            "CTGCCTCGGCAGGGGGAGGAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
+            "AGGGTAATTAAAGGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_argument_with_inequality_on_optimal_align_score(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            arg='gap_open_penalty',
+            default=5,
+            i_range=range(1, 12),
+            # These are intentionally inverted
+            compare_lt=self.assertGreaterEqual,
+            compare_gt=self.assertLessEqual
+        )
+        # The above is not a strict bound, so lets use an expected align
+        # to plug the hole where every align is exactly equal to default
+        expected = {
+            'optimal_alignment_score': 51,
+            'suboptimal_alignment_score': 20,
+            'query_begin': 0,
+            'query_end': 37,
+            'target_begin': 0,
+            'target_end_optimal': 29,
+            'target_end_suboptimal': 9,
+            'cigar': '5M4I3M3I1M1I21M',
+            'query_sequence': 'TAGAGATTAATTGCCACATTGCCACTGCCAAAATTCTG',
+            'target_sequence': 'TAGAGATTAATTGCCACTGCCAAAATTCTG'
+        }
+        query = StripedSmithWaterman(expected['query_sequence'],
+                                     gap_open_penalty=1)
+        alignment = query(expected['target_sequence'])
+        self._check_alignment(alignment, expected)
+
+    def test_arg_gap_extend_penalty(self):
+        query_sequences = [
+            "TTATAATTTTCTTATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTAATACTAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "CTGCCTCAGGGGGAGGCAAAGCGTCAGCGCGGCTGCCGTCGGCGCAGGGGC",
+            "AGGGTAATTAGGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_argument_with_inequality_on_optimal_align_score(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            arg='gap_extend_penalty',
+            default=2,
+            i_range=range(1, 10),
+            # These are intentionally inverted
+            compare_lt=self.assertGreaterEqual,
+            compare_gt=self.assertLessEqual
+        )
+        # The above is not a strict bound, so lets use an expected align
+        # to plug the hole where every align is exactly equal to default
+        expected = {
+            'optimal_alignment_score': 9,
+            'suboptimal_alignment_score': 8,
+            'query_begin': 6,
+            'query_end': 12,
+            'target_begin': 7,
+            'target_end_optimal': 13,
+            'target_end_suboptimal': 38,
+            'cigar': '7M',
+            'query_sequence': 'TCTATAAGATTCCGCATGCGTTACTTATAAGATGTCTCAACGG',
+            'target_sequence': 'GCCCAGTAGCTTCCCAATATGAGAGCATCAATTGTAGATCGGGCC'
+        }
+        query = StripedSmithWaterman(expected['query_sequence'],
+                                     gap_extend_penalty=10)
+        alignment = query(expected['target_sequence'])
+        self._check_alignment(alignment, expected)
+
+    def test_arg_score_only(self):
+        query_sequences = [
+            "TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
+            "AGGGTATTAGGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_bit_flag_sets_properties_falsy_or_negative(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            arg_settings=[('score_only', True)],
+            properties_to_null=['query_begin', 'target_begin', 'cigar']
+        )
+
+    def test_arg_score_filter_is_used(self):
+        query_sequences = [
+            "TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
+            "AGGGTATTAGGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_bit_flag_sets_properties_falsy_or_negative(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            # score_filter will force a BABP and cigar to be falsy
+            arg_settings=[('score_filter', 9001)],
+            properties_to_null=['query_begin', 'target_begin', 'cigar']
+        )
+
+    def test_arg_distance_filter_is_used(self):
+        query_sequences = [
+            "TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
+            "AGGGTATTAGGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_bit_flag_sets_properties_falsy_or_negative(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            # distance_filter will force cigar to be falsy only
+            arg_settings=[('distance_filter', 1)],
+            properties_to_null=['cigar']
+        )
+
+    def test_arg_override_skip_babp(self):
+        query_sequences = [
+            "TTATCGTGATTATTATCAATATTTATAATTTGATTTTGTTGTAAT",
+            "AGTCGAAGGGTAATACTATAAGGCGTGTCACCTA",
+            "AGTCGAAGGGTAATA",
+            "AGGGTAATTAGGCGTGCGTGCGTGTTCACCTA",
+            "AGGGTATTAGGCGTGTTCACCTA"
+        ]
+        target_sequences = query_sequences
+        self._check_bit_flag_sets_properties_falsy_or_negative(
+            query_sequences=query_sequences,
+            target_sequences=target_sequences,
+            # score_filter will force a BABP and cigar to be falsy if not for
+            # override_skip_babp preventing this for all but the cigar
+            arg_settings=[('override_skip_babp', True),
+                          ('score_filter', 9001)],
+            properties_to_null=['cigar']
+        )
+
+    def test_arg_zero_index_changes_base_of_index_to_0_or_1(self):
+        expected_alignments = [
+            ({
+                'optimal_alignment_score': 100,
+                'suboptimal_alignment_score': 44,
+                'query_begin': 5,
+                'query_end': 54,
+                'target_begin': 0,
+                'target_end_optimal': 49,
+                'target_end_suboptimal': 21,
+                'cigar': '50M',
+                'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
+                                   'CCCCGGGCGGGGC'),
+                'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
+                                    'GGGCGGGGC')
+            }, True),
+            ({
+                'optimal_alignment_score': 100,
+                'suboptimal_alignment_score': 44,
+                'query_begin': 6,
+                'query_end': 55,
+                'target_begin': 1,
+                'target_end_optimal': 50,
+                'target_end_suboptimal': 22,
+                'cigar': '50M',
+                'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
+                                   'CCCCGGGCGGGGC'),
+                'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
+                                    'GGGCGGGGC')
+            }, False)
+        ]
+        for expected, z in expected_alignments:
+            query = StripedSmithWaterman(expected['query_sequence'],
+                                         zero_index=z)
+            alignment = query(expected['target_sequence'])
+            self._check_alignment(alignment, expected)
+
+    def test_arg_suppress_sequences(self):
+        expected = {
+            'optimal_alignment_score': 100,
+            'suboptimal_alignment_score': 44,
+            'query_begin': 5,
+            'query_end': 54,
+            'target_begin': 0,
+            'target_end_optimal': 49,
+            'target_end_suboptimal': 21,
+            'cigar': '50M',
+            'query_sequence': '',
+            'target_sequence': ''
+        }
+        query = StripedSmithWaterman(
+            "AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCCGGGCGGGGC",
+            suppress_sequences=True)
+        alignment = query("CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCCGGGCGGGGC")
+        self._check_alignment(alignment, expected)
+
+
+class TestAlignStripedSmithWaterman(TestSSW):
+    def _check_Alignment_to_AlignmentStructure(self, alignment, structure):
+        self.assertEqual(alignment.score(), structure.optimal_alignment_score)
+        self.assertEqual(str(alignment[0]), structure.aligned_query_sequence)
+        self.assertEqual(str(alignment[1]), structure.aligned_target_sequence)
+        if structure.query_begin == -1:
+            self.assertEqual(alignment.start_end_positions(), None)
+        else:
+            for (start, end), (expected_start, expected_end) in \
+                zip(alignment.start_end_positions(),
+                    [(structure.query_begin,
+                      structure.query_end),
+                     (structure.target_begin,
+                      structure.target_end_optimal)]):
+                self.assertEqual(start, expected_start)
+                self.assertEqual(end, expected_end)
+
+    def test_same_as_using_StripedSmithWaterman_object(self):
+        query_sequence = 'ATGGAAGCTATAAGCGCGGGTGAG'
+        target_sequence = 'AACTTATATAATAAAAATTATATATTCGTTGGGTTCTTTTGATATAAATC'
+        query = StripedSmithWaterman(query_sequence)
+        align1 = query(target_sequence)
+        align2 = local_pairwise_align_ssw(query_sequence,
+                                          target_sequence)
+        self._check_Alignment_to_AlignmentStructure(align2, align1)
+
+    def test_kwargs_are_usable(self):
+        kwargs = {}
+        kwargs['mismatch_score'] = -2
+        kwargs['match_score'] = 5
+        query_sequence = 'AGGGTAATTAGGCGTGTTCACCTA'
+        target_sequence = 'TACTTATAAGATGTCTCAACGGCATGCGCAACTTGTGAAGTG'
+        query = StripedSmithWaterman(query_sequence, **kwargs)
+        align1 = query(target_sequence)
+        align2 = local_pairwise_align_ssw(query_sequence,
+                                          target_sequence, **kwargs)
+        self._check_Alignment_to_AlignmentStructure(align2, align1)
+
+
+class TestAlignmentStructure(TestSSW):
+
+    def mock_object_factory(self, dictionary):
+        class MockAlignmentStructure(AlignmentStructure):
+            def __init__(self, _a, _b, _c):
+                for key in dictionary:
+                    setattr(self, key, dictionary[key])
+        return MockAlignmentStructure(None, None, 0)
+
+    def test_works_for_dot_and_square_bracket_access(self):
+        q_seq = "AGGGTAATTAGGCGTGTTCACCTA"
+        query = StripedSmithWaterman(q_seq)
+        alignment = query("TACTTATAAGATGTCTCAACGGCATGCGCAACTTGTGAAGTG")
+        for accessible in self.align_attributes:
+            self.assertEqual(getattr(alignment, accessible),
+                             alignment[accessible])
+
+    def test_is_zero_based_returns_true_if_index_base_is_zero(self):
+        expected_alignments = [
+            ({
+                'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
+                                   'CCCCGGGCGGGGC'),
+                'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
+                                    'GGGCGGGGC')
+            }, True),
+            ({
+                'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
+                                   'CCCCGGGCGGGGC'),
+                'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
+                                    'GGGCGGGGC')
+            }, False)
+        ]
+        for expected, z in expected_alignments:
+            query = StripedSmithWaterman(expected['query_sequence'],
+                                         zero_index=z)
+            alignment = query(expected['target_sequence'])
+            self.assertEqual(z, alignment.is_zero_based())
+
+    def test_set_zero_based_changes_the_index_base(self):
+        expected_alignments = [
+            ({
+                'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
+                                   'CCCCGGGCGGGGC'),
+                'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
+                                    'GGGCGGGGC')
+            }, True),
+            ({
+                'query_sequence': ('AGTCACGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCG'
+                                   'CCCCGGGCGGGGC'),
+                'target_sequence': ('CGCGCGCCGCCGGGGGGCCGGCCGGCGCCGGGGGGCGCCCC'
+                                    'GGGCGGGGC')
+            }, False)
+        ]
+        for expected, z in expected_alignments:
+            query = StripedSmithWaterman(expected['query_sequence'],
+                                         zero_index=z)
+            alignment = query(expected['target_sequence'])
+            alignment.set_zero_based(not z)
+            self.assertEqual(not z, alignment.is_zero_based())
+
+    def test__get_aligned_sequences(self):
+        generic_sequence = "123456789abcdefghijklmnopqrstuvwxyz"
+        tests = [  # `end_after_cigar` is how far end extends beyond the cigar.
+                   #  Negative values on this should not be possible with SSW
+            {
+                'cigar_tuples': [
+                    (4, 'M'), (3, 'I'), (1, 'D'), (15, 'M')
+                ],
+                'begin': 4,
+                'end_after_cigar': 2,
+                'gap_type': 'I',
+                'expected': "5678---9abcdefghijklmnop"
+            },
+            {
+                'cigar_tuples': [
+                    (12, 'M')
+                ],
+                'begin': 10,
+                'end_after_cigar': 0,
+                'gap_type': 'D',
+                'expected': "bcdefghijklm"
+            },
+            {
+                'cigar_tuples': [
+                    (10, 'D'), (1, 'M'), (3, 'I'), (2, 'M')
+                ],
+                'begin': 0,
+                'end_after_cigar': 5,
+                'gap_type': 'I',
+                'expected': "1---2345678"
+            },
+            {
+                'cigar_tuples': [
+                    (10, 'D'), (1, 'M'), (3, 'I'), (2, 'M')
+                ],
+                'begin': 3,
+                'end_after_cigar': 0,
+                'gap_type': 'D',
+                'expected': "----------456"
+            },
+            {
+                'cigar_tuples': [
+                    (1, 'I'), (4, 'M'), (3, 'I'), (1, 'D'), (8, 'M'), (8, 'D'),
+                    (2, 'I'), (6, 'M'), (1, 'I')
+                ],
+                'begin': 4,
+                'end_after_cigar': 3,
+                'gap_type': 'I',
+                'expected': "-5678---9abcdefg--hijklm-nop"
+            }
+        ]
+        for test in tests:
+            mock_object = self.mock_object_factory({})
+            # Because SSW's output is [a, b] and Python's list ranges use
+            # [a, b) a 1 is added in the calculation of aligned sequences.
+            # We just have to subtract 1 while we are testing with the easy to
+            # verify interface of `end_after_cigar` to cancel this range effect
+            # out.
+            end = test['end_after_cigar'] - 1 + test['begin'] + \
+                sum([le if t == 'M' else 0 for le, t in test['cigar_tuples']])
+            self.assertEqual(test['expected'],
+                             AlignmentStructure._get_aligned_sequence(
+                                 mock_object, generic_sequence,
+                                 test['cigar_tuples'], test['begin'],
+                                 end, test['gap_type']))
+
+    def test_aligned_query_target_sequence(self):
+        query = StripedSmithWaterman("AGGGTAATTAGGCGTGTTCACCTA")
+        alignment = query("AGTCGAAGGGTAATATAGGCGTGTCACCTA")
+        self.assertEqual("AGGGTAATATAGGCGT-GTCACCTA",
+                         alignment.aligned_target_sequence)
+        self.assertEqual("AGGGTAAT-TAGGCGTGTTCACCTA",
+                         alignment.aligned_query_sequence)
+
+    def test_aligned_query_target_sequence_with_suppressed_sequences(self):
+        query = StripedSmithWaterman("AGGGTAATTAGGCGTGTTCACCTA",
+                                     suppress_sequences=True)
+        alignment = query("AGTCGAAGGGTAATATAGGCGTGTCACCTA")
+        self.assertEqual(None, alignment.aligned_target_sequence)
+        self.assertEqual(None, alignment.aligned_query_sequence)
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/diversity/__init__.py b/skbio/diversity/__init__.py
new file mode 100644
index 0000000..5015b62
--- /dev/null
+++ b/skbio/diversity/__init__.py
@@ -0,0 +1,30 @@
+"""
+Diversity calculations (:mod:`skbio.diversity`)
+===============================================
+
+.. currentmodule:: skbio.diversity
+
+This package provides functionality for calculating community diversity,
+including various alpha- and beta-diversity measures.
+
+Subpackages
+-----------
+
+.. autosummary::
+   :toctree: generated/
+
+   alpha
+   beta
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+test = Tester().test
diff --git a/skbio/diversity/alpha/__init__.py b/skbio/diversity/alpha/__init__.py
new file mode 100644
index 0000000..b63122f
--- /dev/null
+++ b/skbio/diversity/alpha/__init__.py
@@ -0,0 +1,161 @@
+"""
+Alpha diversity measures (:mod:`skbio.diversity.alpha`)
+=======================================================
+
+.. currentmodule:: skbio.diversity.alpha
+
+This package provides implementations of various alpha diversity measures,
+including measures of richness, dominance, and evenness. Some functions
+generate confidence intervals (CIs). These functions have the suffix ``_ci``.
+
+All alpha diversity measures accept a vector of counts within a single sample,
+where each count is, for example, the number of observations of a particular
+Operational Taxonomic Unit, or OTU. We use the term "OTU" here very loosely, as
+these could be counts of any type of feature/observation (e.g., bacterial
+species). We'll refer to this vector as the *counts vector* or simply *counts*
+throughout the documentation.
+
+The counts vector must be one-dimensional and contain integers representing the
+number of individuals seen (or *counted*) for a particular OTU. Negative values
+are not allowed; the counts vector may only contain integers greater than or
+equal to zero.
+
+The counts vector is `array_like`: anything that can be converted into a 1-D
+numpy array is acceptable input. For example, you can provide a numpy array or
+a native Python list and the results should be identical.
+
+If the input to an alpha diversity measure does not meet the above
+requirements, the function will raise either a ``ValueError`` or a
+``TypeError``, depending on the condition that is violated.
+
+.. note:: There are different ways that samples are represented in the
+   ecological literature and in related software. The alpha diversity measures
+   provided here *always* assume that the input contains abundance data: each
+   count represents the number of individuals seen for a particular OTU in the
+   sample. For example, if you have two OTUs, where 3 individuals were observed
+   from one of the OTUs and only a single individual was observed from the
+   other, you could represent this data in the following forms (among others):
+
+   As a vector of counts. This is the expected type of input for the alpha
+   diversity measures in this module. There are 3 individuals from the OTU at
+   index 0, and 1 individual from the OTU at index 1:
+
+   >>> counts = [3, 1]
+
+   As a vector of indices. The OTU at index 0 is observed 3 times, while the
+   OTU at index 1 is observed 1 time:
+
+   >>> indices = [0, 0, 0, 1]
+
+   As a vector of frequencies. We have 1 OTU that is a singleton and 1 OTU that
+   is a tripleton. We do not have any 0-tons or doubletons:
+
+   >>> frequencies = [0, 1, 0, 1]
+
+   Always use the first representation (a counts vector) with this module.
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   ace
+   berger_parker_d
+   brillouin_d
+   chao1
+   chao1_ci
+   dominance
+   doubles
+   enspie
+   equitability
+   esty_ci
+   fisher_alpha
+   gini_index
+   goods_coverage
+   heip_e
+   kempton_taylor_q
+   lladser_ci
+   lladser_pe
+   margalef
+   mcintosh_d
+   mcintosh_e
+   menhinick
+   michaelis_menten_fit
+   observed_otus
+   osd
+   robbins
+   shannon
+   simpson
+   simpson_e
+   singles
+   strong
+
+Examples
+--------
+
+>>> import numpy as np
+
+Assume we have the following abundance data for a sample, represented as a
+counts vector:
+
+>>> counts = [1, 0, 0, 4, 1, 2, 3, 0]
+
+We can count the number of OTUs:
+
+>>> observed_otus(counts)
+5
+
+Note that OTUs with counts of zero are ignored.
+
+In the previous example, we provided a Python list as input. We can also
+provide other types of input that are `array_like`:
+
+>>> observed_otus((1, 0, 0, 4, 1, 2, 3, 0)) # tuple
+5
+>>> observed_otus(np.array([1, 0, 0, 4, 1, 2, 3, 0])) # numpy array
+5
+
+All of the alpha diversity measures work in this manner.
+
+Other metrics include ``singles``, which tells us how many OTUs are observed
+exactly one time (i.e., are *singleton* OTUs), and ``doubles``, which tells us
+how many OTUs are observed exactly two times (i.e., are *doubleton* OTUs).
+Let's see how many singletons and doubletons there are in the sample:
+
+>>> singles(counts)
+2
+>>> doubles(counts)
+1
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._ace import ace
+from ._chao1 import chao1, chao1_ci
+from ._base import (
+    berger_parker_d, brillouin_d, dominance, doubles, enspie, equitability,
+    esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q, margalef,
+    mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit, observed_otus,
+    osd, robbins, shannon, simpson, simpson_e, singles, strong)
+from ._gini import gini_index
+from ._lladser import lladser_pe, lladser_ci
+
+__all__ = ['ace', 'chao1', 'chao1_ci', 'berger_parker_d', 'brillouin_d',
+           'dominance', 'doubles', 'enspie', 'equitability', 'esty_ci',
+           'fisher_alpha', 'goods_coverage', 'heip_e', 'kempton_taylor_q',
+           'margalef', 'mcintosh_d', 'mcintosh_e', 'menhinick',
+           'michaelis_menten_fit', 'observed_otus', 'osd', 'robbins',
+           'shannon', 'simpson', 'simpson_e', 'singles', 'strong',
+           'gini_index', 'lladser_pe', 'lladser_ci']
+
+test = Tester().test
diff --git a/skbio/diversity/alpha/_ace.py b/skbio/diversity/alpha/_ace.py
new file mode 100644
index 0000000..5da3ad3
--- /dev/null
+++ b/skbio/diversity/alpha/_ace.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+
+from ._base import _validate
+
+
+def ace(counts, rare_threshold=10):
+    """Calculate the ACE metric (Abundance-based Coverage Estimator).
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    rare_threshold : int, optional
+        Threshold at which an OTU containing as many or fewer individuals will
+        be considered rare.
+
+    Returns
+    -------
+    double
+        Computed ACE metric.
+
+    Raises
+    ------
+    ValueError
+        If every rare OTU is a singleton.
+
+    Notes
+    -----
+    ACE was first introduced in [1]_ and [2]_. The implementation here is based
+    on the description given in the EstimateS manual [3]_.
+
+    If no rare OTUs exist, returns the number of abundant OTUs. The default
+    value of 10 for `rare_threshold` is based on [4]_.
+
+    If `counts` contains zeros, indicating OTUs which are known to exist in the
+    environment but did not appear in the sample, they will be ignored for the
+    purpose of calculating the number of rare OTUs.
+
+    References
+    ----------
+    .. [1] Chao, A. & S.-M Lee. 1992 Estimating the number of classes via
+       sample coverage. Journal of the American Statistical Association 87,
+       210-217.
+    .. [2] Chao, A., M.-C. Ma, & M. C. K. Yang. 1993. Stopping rules and
+       estimation for recapture debugging with unequal failure rates.
+       Biometrika 80, 193-201.
+    .. [3] http://viceroy.eeb.uconn.edu/estimates/
+    .. [4] Chao, A., W.-H. Hwang, Y.-C. Chen, and C.-Y. Kuo. 2000. Estimating
+       the number of shared species in two communities. Statistica Sinica
+       10:227-246.
+
+    """
+    counts = _validate(counts)
+    freq_counts = np.bincount(counts)
+    s_rare = _otus_rare(freq_counts, rare_threshold)
+    singles = freq_counts[1]
+
+    if singles > 0 and singles == s_rare:
+        raise ValueError("The only rare OTUs are singletons, so the ACE "
+                         "metric is undefined. EstimateS suggests using "
+                         "bias-corrected Chao1 instead.")
+
+    s_abun = _otus_abundant(freq_counts, rare_threshold)
+    if s_rare == 0:
+        return s_abun
+
+    n_rare = _number_rare(freq_counts, rare_threshold)
+    c_ace = 1 - singles / n_rare
+
+    top = s_rare * _number_rare(freq_counts, rare_threshold, gamma=True)
+    bottom = c_ace * n_rare * (n_rare - 1)
+    gamma_ace = (top / bottom) - 1
+
+    if gamma_ace < 0:
+        gamma_ace = 0
+
+    return s_abun + (s_rare / c_ace) + ((singles / c_ace) * gamma_ace)
+
+
+def _otus_rare(freq_counts, rare_threshold):
+    """Count number of rare OTUs."""
+    return freq_counts[1:rare_threshold + 1].sum()
+
+
+def _otus_abundant(freq_counts, rare_threshold):
+    """Count number of abundant OTUs."""
+    return freq_counts[rare_threshold + 1:].sum()
+
+
+def _number_rare(freq_counts, rare_threshold, gamma=False):
+    """Return number of individuals in rare OTUs.
+
+    ``gamma=True`` generates the ``n_rare`` used for the variation coefficient.
+
+    """
+    n_rare = 0
+
+    if gamma:
+        for i, j in enumerate(freq_counts[:rare_threshold + 1]):
+            n_rare = n_rare + (i * j) * (i - 1)
+    else:
+        for i, j in enumerate(freq_counts[:rare_threshold + 1]):
+            n_rare = n_rare + (i * j)
+
+    return n_rare
diff --git a/skbio/diversity/alpha/_base.py b/skbio/diversity/alpha/_base.py
new file mode 100644
index 0000000..127d95b
--- /dev/null
+++ b/skbio/diversity/alpha/_base.py
@@ -0,0 +1,913 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+from scipy.special import gammaln
+from scipy.optimize import fmin_powell, minimize_scalar
+
+from skbio.stats import subsample_counts
+
+
+def _validate(counts, suppress_cast=False):
+    """Validate and convert input to an acceptable counts vector type.
+
+    Note: may not always return a copy of `counts`!
+
+    """
+    counts = np.asarray(counts)
+
+    if not suppress_cast:
+        counts = counts.astype(int, casting='safe', copy=False)
+
+    if counts.ndim != 1:
+        raise ValueError("Only 1-D vectors are supported.")
+    elif (counts < 0).any():
+        raise ValueError("Counts vector cannot contain negative values.")
+
+    return counts
+
+
+def berger_parker_d(counts):
+    """Calculate Berger-Parker dominance.
+
+    Berger-Parker dominance is defined as the fraction of the sample that
+    belongs to the most abundant OTUs:
+
+    .. math::
+
+       d = \\frac{N_{max}}{N}
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Berger-Parker dominance.
+
+    Notes
+    -----
+    Berger-Parker dominance is defined in [1]_. The implementation here is
+    based on the description given in the SDR-IV online manual [2]_.
+
+    References
+    ----------
+    .. [1] Berger & Parker (1970). SDR-IV online help.
+    .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    return counts.max() / counts.sum()
+
+
+def brillouin_d(counts):
+    """Calculate Brillouin index of alpha diversity, which is defined as:
+
+    .. math::
+
+       HB = \\frac{\\ln N!-\\sum^5_{i=1}{\\ln n_i!}}{N}
+
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Brillouin index.
+
+    Notes
+    -----
+    The implementation here is based on the description given in the SDR-IV
+    online manual [1]_.
+
+    References
+    ----------
+    .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    nz = counts[counts.nonzero()]
+    n = nz.sum()
+    return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
+
+
+def dominance(counts):
+    """Calculate dominance.
+
+    Dominance is defined as
+
+    .. math::
+
+       \\sum{p_i^2}
+
+    where :math:`p_i` is the proportion of the entire community that OTU
+    :math:`i` represents.
+
+    Dominance can also be defined as 1 - Simpson's index. It ranges between
+    0 and 1.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Dominance.
+
+    See Also
+    --------
+    simpson
+
+    Notes
+    -----
+    The implementation here is based on the description given in [1]_.
+
+    References
+    ----------
+    .. [1] http://folk.uio.no/ohammer/past/diversity.html
+
+    """
+    counts = _validate(counts)
+    freqs = counts / counts.sum()
+    return (freqs * freqs).sum()
+
+
+def doubles(counts):
+    """Calculate number of double occurrences (doubletons).
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    int
+        Doubleton count.
+
+    """
+    counts = _validate(counts)
+    return (counts == 2).sum()
+
+
+def enspie(counts):
+    """Calculate ENS_pie alpha diversity measure.
+
+    ENS_pie is equivalent to ``1 / dominance``.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        ENS_pie alpha diversity measure.
+
+    See Also
+    --------
+    dominance
+
+    Notes
+    -----
+    ENS_pie is defined in [1]_.
+
+    References
+    ----------
+    .. [1] Chase and Knight (2013). "Scale-dependent effect sizes of ecological
+       drivers on biodiversity: why standardised sampling is not enough".
+       Ecology Letters, Volume 16, Issue Supplement s1, pgs 17-26.
+
+    """
+    counts = _validate(counts)
+    return 1 / dominance(counts)
+
+
+def equitability(counts, base=2):
+    """Calculate equitability (Shannon index corrected for number of OTUs).
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    base : scalar, optional
+        Logarithm base to use in the calculations.
+
+    Returns
+    -------
+    double
+        Measure of equitability.
+
+    See Also
+    --------
+    shannon
+
+    Notes
+    -----
+    The implementation here is based on the description given in the SDR-IV
+    online manual [1]_.
+
+    References
+    ----------
+    .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    numerator = shannon(counts, base)
+    denominator = np.log(observed_otus(counts)) / np.log(base)
+    return numerator / denominator
+
+
+def esty_ci(counts):
+    """Calculate Esty's CI.
+
+    Esty's CI is defined as
+
+    .. math::
+
+       F_1/N \\pm z\\sqrt{W}
+
+    where :math:`F_1` is the number of singleton OTUs, :math:`N` is the total
+    number of individuals (sum of abundances for all OTUs), and :math:`z` is a
+    constant that depends on the targeted confidence and based on the normal
+    distribution.
+
+    :math:`W` is defined as
+
+    .. math::
+
+       \\frac{F_1(N-F_1)+2NF_2}{N^3}
+
+    where :math:`F_2` is the number of doubleton OTUs.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    tuple
+        Esty's confidence interval as ``(lower_bound, upper_bound)``.
+
+    Notes
+    -----
+    Esty's CI is defined in [1]_. :math:`z` is hardcoded for a 95% confidence
+    interval.
+
+    References
+    ----------
+    .. [1] Esty, W. W. (1983). "A normal limit law for a nonparametric
+       estimator of the coverage of a random sample". Ann Statist 11: 905-912.
+
+    """
+    counts = _validate(counts)
+
+    f1 = singles(counts)
+    f2 = doubles(counts)
+    n = counts.sum()
+    z = 1.959963985
+    W = (f1 * (n - f1) + 2 * n * f2) / (n ** 3)
+
+    return f1 / n - z * np.sqrt(W), f1 / n + z * np.sqrt(W)
+
+
+def fisher_alpha(counts):
+    """Calculate Fisher's alpha.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Fisher's alpha.
+
+    Raises
+    ------
+    RuntimeError
+        If the optimizer fails to converge (error > 1.0).
+
+    Notes
+    -----
+    The implementation here is based on the description given in the SDR-IV
+    online manual [1]_. Uses ``scipy.optimize.minimize_scalar`` to find
+    Fisher's alpha.
+
+    References
+    ----------
+    .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    n = counts.sum()
+    s = observed_otus(counts)
+
+    def f(alpha):
+        return (alpha * np.log(1 + (n / alpha)) - s) ** 2
+
+    # Temporarily silence RuntimeWarnings (invalid and division by zero) during
+    # optimization in case invalid input is provided to the objective function
+    # (e.g. alpha=0).
+    orig_settings = np.seterr(divide='ignore', invalid='ignore')
+    try:
+        alpha = minimize_scalar(f).x
+    finally:
+        np.seterr(**orig_settings)
+
+    if f(alpha) > 1.0:
+        raise RuntimeError("Optimizer failed to converge (error > 1.0), so "
+                           "could not compute Fisher's alpha.")
+    return alpha
+
+
+def goods_coverage(counts):
+    """Calculate Good's coverage of counts.
+
+    Good's coverage estimator is defined as
+
+    .. math::
+
+       1-\\frac{F_1}{N}
+
+    where :math:`F_1` is the number of singleton OTUs and :math:`N` is the
+    total number of individuals (sum of abundances for all OTUs).
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Good's coverage estimator.
+
+    """
+    counts = _validate(counts)
+    f1 = singles(counts)
+    N = counts.sum()
+    return 1 - (f1 / N)
+
+
+def heip_e(counts):
+    """Calculate Heip's evenness measure.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Heip's evenness measure.
+
+    Notes
+    -----
+    The implementation here is based on the description in [1]_.
+
+    References
+    ----------
+    .. [1] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
+       UK., 54, 555-557.
+
+    """
+    counts = _validate(counts)
+    return ((np.exp(shannon(counts, base=np.e)) - 1) /
+            (observed_otus(counts) - 1))
+
+
+def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
+    """Calculate Kempton-Taylor Q index of alpha diversity.
+
+    Estimates the slope of the cumulative abundance curve in the interquantile
+    range. By default, uses lower and upper quartiles, rounding inwards.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    lower_quantile : float, optional
+        Lower bound of the interquantile range. Defaults to lower quartile.
+    upper_quantile : float, optional
+        Upper bound of the interquantile range. Defaults to upper quartile.
+
+    Returns
+    -------
+    double
+        Kempton-Taylor Q index of alpha diversity.
+
+    Notes
+    -----
+    The index is defined in [1]_. The implementation here is based on the
+    description given in the SDR-IV online manual [2]_.
+
+    The implementation provided here differs slightly from the results given in
+    Magurran 1998. Specifically, we have 14 in the numerator rather than 15.
+    Magurran recommends counting half of the OTUs with the same # counts as the
+    point where the UQ falls and the point where the LQ falls, but the
+    justification for this is unclear (e.g. if there were a very large # OTUs
+    that just overlapped one of the quantiles, the results would be
+    considerably off). Leaving the calculation as-is for now, but consider
+    changing.
+
+    References
+    ----------
+    .. [1] Kempton, R. A. and Taylor, L. R. (1976) Models and statistics for
+       species diversity. Nature, 262, 818-820.
+    .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    n = len(counts)
+    lower = int(np.ceil(n * lower_quantile))
+    upper = int(n * upper_quantile)
+    sorted_counts = np.sort(counts)
+    return (upper - lower) / np.log(sorted_counts[upper] /
+                                    sorted_counts[lower])
+
+
+def margalef(counts):
+    """Calculate Margalef's richness index, which is defined as:
+
+    .. math::
+
+       D = \\frac{(S - 1)}{\\ln N}
+
+    where :math:`S` is the species number and :math:`N` is the
+    total number of individuals (sum of abundances for all OTUs).
+
+
+    Assumes log accumulation.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Margalef's richness index.
+
+    Notes
+    -----
+    Based on the description in [1]_.
+
+    References
+    ----------
+    .. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
+       76-77.
+
+    """
+    counts = _validate(counts)
+    return (observed_otus(counts) - 1) / np.log(counts.sum())
+
+
+def mcintosh_d(counts):
+    """Calculate McIntosh dominance index D, which is defined as:
+
+    .. math::
+
+       D = \\frac{N - U}{N - \\sqrt{N}}
+
+    where :math:`N` is the total number of individuals (sum of abundances for
+    all OTUs) and :math:`U` is given as:
+
+    .. math::
+
+        U = \\sqrt{\\sum{{n_i}^2}}
+
+    where :math:`n_i` is the sum of abundances for all OTUs in the
+    :math:`i_{th}` species.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        McIntosh dominance index D.
+
+    See Also
+    --------
+    mcintosh_e
+
+    Notes
+    -----
+    The index was proposed in [1]_. The implementation here is based on the
+    description given in the SDR-IV online manual [2]_.
+
+    References
+    ----------
+    .. [1] McIntosh, R. P. 1967 An index of diversity and the relation of
+       certain concepts to diversity. Ecology 48, 1115-1126.
+    .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    u = np.sqrt((counts * counts).sum())
+    n = counts.sum()
+    return (n - u) / (n - np.sqrt(n))
+
+
+def mcintosh_e(counts):
+    """Calculate McIntosh's evenness measure E.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        McIntosh evenness measure E.
+
+    See Also
+    --------
+    mcintosh_d
+
+    Notes
+    -----
+    The implementation here is based on the description given in [1]_, *NOT*
+    the one in the SDR-IV online manual, which is wrong.
+
+    References
+    ----------
+    .. [1] Heip & Engels 1974 p 560.
+
+    """
+    counts = _validate(counts)
+    numerator = np.sqrt((counts * counts).sum())
+    n = counts.sum()
+    s = observed_otus(counts)
+    denominator = np.sqrt((n - s + 1) ** 2 + s - 1)
+    return numerator / denominator
+
+
+def menhinick(counts):
+    """Calculate Menhinick's richness index.
+
+    Assumes square-root accumulation.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Menhinick's richness index.
+
+    Notes
+    -----
+    Based on the description in [1]_.
+
+    References
+    ----------
+    .. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
+       76-77.
+
+    """
+    counts = _validate(counts)
+    return observed_otus(counts) / np.sqrt(counts.sum())
+
+
+def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
+    """Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
+
+    The Michaelis-Menten equation is defined as
+
+    .. math::
+
+       S=\\frac{nS_{max}}{n+B}
+
+    where :math:`n` is the number of individuals and :math:`S` is the number of
+    OTUs. This function estimates the :math:`S_{max}` parameter.
+
+    The fit is made to datapoints for :math:`n=1,2,...,N`, where :math:`N` is
+    the total number of individuals (sum of abundances for all OTUs).
+    :math:`S` is the number of OTUs represented in a random sample of :math:`n`
+    individuals.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    num_repeats : int, optional
+        The number of times to perform rarefaction (subsampling without
+        replacement) at each value of :math:`n`.
+    params_guess : tuple, optional
+        Initial guess of :math:`S_{max}` and :math:`B`. If ``None``, default
+        guess for :math:`S_{max}` is :math:`S` (as :math:`S_{max}` should
+        be >= :math:`S`) and default guess for :math:`B` is ``round(N / 2)``.
+
+    Returns
+    -------
+    S_max : double
+        Estimate of the :math:`S_{max}` parameter in the Michaelis-Menten
+        equation.
+
+    See Also
+    --------
+    skbio.stats.subsample_counts
+
+    Notes
+    -----
+    There is some controversy about how to do the fitting. The ML model given
+    in [1]_ is based on the assumption that error is roughly proportional to
+    magnitude of observation, reasonable for enzyme kinetics but not reasonable
+    for rarefaction data. Here we just do a nonlinear curve fit for the
+    parameters using least-squares.
+
+    References
+    ----------
+    .. [1] Raaijmakers, J. G. W. 1987 Statistical analysis of the
+       Michaelis-Menten equation. Biometrics 43, 793-803.
+
+    """
+    counts = _validate(counts)
+
+    n_indiv = counts.sum()
+    if params_guess is None:
+        S_max_guess = observed_otus(counts)
+        B_guess = int(round(n_indiv / 2))
+        params_guess = (S_max_guess, B_guess)
+
+    # observed # of OTUs vs # of individuals sampled, S vs n
+    xvals = np.arange(1, n_indiv + 1)
+    ymtx = np.empty((num_repeats, len(xvals)), dtype=int)
+    for i in range(num_repeats):
+        ymtx[i] = np.asarray([observed_otus(subsample_counts(counts, n))
+                              for n in xvals], dtype=int)
+    yvals = ymtx.mean(0)
+
+    # Vectors of actual vals y and number of individuals n.
+    def errfn(p, n, y):
+        return (((p[0] * n / (p[1] + n)) - y) ** 2).sum()
+
+    # Return S_max.
+    return fmin_powell(errfn, params_guess, ftol=1e-5, args=(xvals, yvals),
+                       disp=False)[0]
+
+
+def observed_otus(counts):
+    """Calculate the number of distinct OTUs.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    int
+        Distinct OTU count.
+
+    """
+    counts = _validate(counts)
+    return (counts != 0).sum()
+
+
+def osd(counts):
+    """Calculate observed OTUs, singles, and doubles.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    osd : tuple
+        Observed OTUs, singles, and doubles.
+
+    See Also
+    --------
+    observed_otus
+    singles
+    doubles
+
+    Notes
+    -----
+    This is a convenience function used by many of the other measures that rely
+    on these three measures.
+
+    """
+    counts = _validate(counts)
+    return observed_otus(counts), singles(counts), doubles(counts)
+
+
+def robbins(counts):
+    """Calculate Robbins' estimator for the probability of unobserved outcomes.
+
+    Robbins' estimator is defined as
+
+    .. math::
+
+       \\frac{F_1}{n+1}
+
+    where :math:`F_1` is the number of singleton OTUs.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Robbins' estimate.
+
+    Notes
+    -----
+    Robbins' estimator is defined in [1]_. The estimate computed here is for
+    :math:`n-1` counts, i.e. the x-axis is off by 1.
+
+    References
+    ----------
+    .. [1] Robbins, H. E (1968). Ann. of Stats. Vol 36, pp. 256-257.
+
+    """
+    counts = _validate(counts)
+    return singles(counts) / counts.sum()
+
+
+def shannon(counts, base=2):
+    """Calculate Shannon entropy of counts (H), default in bits.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    base : scalar, optional
+        Logarithm base to use in the calculations.
+
+    Returns
+    -------
+    double
+        Shannon diversity index H.
+
+    Notes
+    -----
+    The implementation here is based on the description given in the SDR-IV
+    online manual [1]_, except that the default logarithm base used here is 2
+    instead of :math:`e`.
+
+    References
+    ----------
+    .. [1] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    freqs = counts / counts.sum()
+    nonzero_freqs = freqs[freqs.nonzero()]
+    return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
+
+
+def simpson(counts):
+    """Calculate Simpson's index.
+
+    Simpson's index is defined as 1 - dominance.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Simpson's index.
+
+    See Also
+    --------
+    dominance
+
+    Notes
+    -----
+    The implementation here is ``1 - dominance`` as described in [1]_. Other
+    references (such as [2]_) define Simpson's index as ``1 / dominance``.
+
+    References
+    ----------
+    .. [1] http://folk.uio.no/ohammer/past/diversity.html
+    .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    return 1 - dominance(counts)
+
+
+def simpson_e(counts):
+    """Calculate Simpson's evenness measure E.
+
+    Simpson's E is defined as
+
+    .. math::
+
+       E=\\frac{1 / D}{S_{obs}}
+
+    where :math:`D` is dominance and :math:`S_{obs}` is the number of observed
+    OTUs.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Simpson's evenness measure E.
+
+    See Also
+    --------
+    dominance
+    enspie
+    simpson
+
+    Notes
+    -----
+    The implementation here is based on the description given in [1]_.
+
+    References
+    ----------
+    .. [1] http://www.tiem.utk.edu/~gross/bioed/bealsmodules/simpsonDI.html
+
+    """
+    counts = _validate(counts)
+    return enspie(counts) / observed_otus(counts)
+
+
+def singles(counts):
+    """Calculate number of single occurrences (singletons).
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    int
+        Singleton count.
+
+    """
+    counts = _validate(counts)
+    return (counts == 1).sum()
+
+
+def strong(counts):
+    """Calculate Strong's dominance index (Dw).
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+
+    Returns
+    -------
+    double
+        Strong's dominance index (Dw).
+
+    Notes
+    -----
+    Strong's dominance index is defined in [1]_. The implementation here is
+    based on the description given in the SDR-IV online manual [2]_.
+
+    References
+    ----------
+    .. [1] Strong, W. L., 2002 Assessing species abundance uneveness within and
+       between plant communities. Community Ecology, 3, 237-246.
+    .. [2] http://www.pisces-conservation.com/sdrhelp/index.html
+
+    """
+    counts = _validate(counts)
+    n = counts.sum()
+    s = observed_otus(counts)
+    i = np.arange(1, len(counts) + 1)
+    sorted_sum = np.sort(counts)[::-1].cumsum()
+    return (sorted_sum / n - (i / s)).max()
diff --git a/skbio/diversity/alpha/_chao1.py b/skbio/diversity/alpha/_chao1.py
new file mode 100644
index 0000000..7e883bf
--- /dev/null
+++ b/skbio/diversity/alpha/_chao1.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+
+from ._base import _validate, osd
+
+
+def chao1(counts, bias_corrected=True):
+    """Calculate chao1 richness estimator.
+
+    Uses the bias-corrected version unless `bias_corrected` is ``False`` *and*
+    there are both singletons and doubletons.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    bias_corrected : bool, optional
+        Indicates whether or not to use the bias-corrected version of the
+        equation. If ``False`` *and* there are both singletons and doubletons,
+        the uncorrected version will be used. The biased-corrected version will
+        be used otherwise.
+
+    Returns
+    -------
+    double
+        Computed chao1 richness estimator.
+
+    See Also
+    --------
+    chao1_ci
+
+    Notes
+    -----
+    The uncorrected version is based on Equation 6 in [1]_:
+
+    .. math::
+
+       chao1=S_{obs}+\\frac{F_1^2}{2F_2}
+
+    where :math:`F_1` and :math:`F_2` are the count of singletons and
+    doubletons, respectively.
+
+    The bias-corrected version is defined as
+
+    .. math::
+
+       chao1=S_{obs}+\\frac{F_1(F_1-1)}{2(F_2+1)}
+
+    References
+    ----------
+    .. [1] Chao, A. 1984. Non-parametric estimation of the number of classes in
+       a population. Scandinavian Journal of Statistics 11, 265-270.
+
+    """
+    counts = _validate(counts)
+    o, s, d = osd(counts)
+
+    if not bias_corrected and s and d:
+        return o + s ** 2 / (d * 2)
+    else:
+        return o + s * (s - 1) / (2 * (d + 1))
+
+
+def chao1_ci(counts, bias_corrected=True, zscore=1.96):
+    """Calculate chao1 confidence interval.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    bias_corrected : bool, optional
+        Indicates whether or not to use the bias-corrected version of the
+        equation. If ``False`` *and* there are both singletons and doubletons,
+        the uncorrected version will be used. The biased-corrected version will
+        be used otherwise.
+    zscore : scalar, optional
+        Score to use for confidence. Default of 1.96 is for a 95% confidence
+        interval.
+
+    Returns
+    -------
+    tuple
+        chao1 confidence interval as ``(lower_bound, upper_bound)``.
+
+    See Also
+    --------
+    chao1
+
+    Notes
+    -----
+    The implementation here is based on the equations in the EstimateS manual
+    [1]_. Different equations are employed to calculate the chao1 variance and
+    confidence interval depending on `bias_corrected` and the presence/absence
+    of singletons and/or doubletons.
+
+    Specifically, the following EstimateS equations are used:
+
+    1. No singletons, Equation 14.
+    2. Singletons but no doubletons, Equations 7, 13.
+    3. Singletons and doubletons, ``bias_corrected=True``, Equations 6, 13.
+    4. Singletons and doubletons, ``bias_corrected=False``, Equations 5, 13.
+
+    References
+    ----------
+    .. [1] http://viceroy.eeb.uconn.edu/estimates/
+
+    """
+    counts = _validate(counts)
+    o, s, d = osd(counts)
+    if s:
+        chao = chao1(counts, bias_corrected)
+        chaovar = _chao1_var(counts, bias_corrected)
+        return _chao_confidence_with_singletons(chao, o, chaovar, zscore)
+    else:
+        n = counts.sum()
+        return _chao_confidence_no_singletons(n, o, zscore)
+
+
+def _chao1_var(counts, bias_corrected=True):
+    """Calculates chao1 variance using decision rules in EstimateS."""
+    o, s, d = osd(counts)
+    if not d:
+        c = chao1(counts, bias_corrected)
+        return _chao1_var_no_doubletons(s, c)
+    if not s:
+        n = counts.sum()
+        return _chao1_var_no_singletons(n, o)
+    if bias_corrected:
+        return _chao1_var_bias_corrected(s, d)
+    else:
+        return _chao1_var_uncorrected(s, d)
+
+
+def _chao1_var_uncorrected(singles, doubles):
+    """Calculates chao1, uncorrected.
+
+    From EstimateS manual, equation 5.
+
+    """
+    r = singles / doubles
+    return doubles * (.5 * r ** 2 + r ** 3 + .24 * r ** 4)
+
+
+def _chao1_var_bias_corrected(s, d):
+    """Calculates chao1 variance, bias-corrected.
+
+    `s` is the number of singletons and `d` is the number of doubletons.
+
+    From EstimateS manual, equation 6.
+
+    """
+    return (s * (s - 1) / (2 * (d + 1)) + (s * (2 * s - 1) ** 2) /
+            (4 * (d + 1) ** 2) + (s ** 2 * d * (s - 1) ** 2) /
+            (4 * (d + 1) ** 4))
+
+
+def _chao1_var_no_doubletons(s, chao1):
+    """Calculates chao1 variance in absence of doubletons.
+
+    From EstimateS manual, equation 7.
+
+    `s` is the number of singletons, and `chao1` is the estimate of the mean of
+    Chao1 from the same dataset.
+
+    """
+    return s * (s - 1) / 2 + s * (2 * s - 1) ** 2 / 4 - s ** 4 / (4 * chao1)
+
+
+def _chao1_var_no_singletons(n, o):
+    """Calculates chao1 variance in absence of singletons.
+
+    `n` is the number of individuals and `o` is the number of observed OTUs.
+
+    From EstimateS manual, equation 8.
+
+    """
+    return o * np.exp(-n / o) * (1 - np.exp(-n / o))
+
+
+def _chao_confidence_with_singletons(chao, observed, var_chao, zscore=1.96):
+    """Calculates confidence bounds for chao1 or chao2.
+
+    Uses Eq. 13 of EstimateS manual.
+
+    `zscore` is the score to use for confidence. The default of 1.96 is for 95%
+    confidence.
+
+    """
+    T = chao - observed
+    # if no diff betweeh chao and observed, CI is just point estimate of
+    # observed
+    if T == 0:
+        return observed, observed
+    K = np.exp(abs(zscore) * np.sqrt(np.log(1 + (var_chao / T ** 2))))
+    return observed + T / K, observed + T * K
+
+
+def _chao_confidence_no_singletons(n, s, zscore=1.96):
+    """Calculates confidence bounds for chao1/chao2 in absence of singletons.
+
+    Uses Eq. 14 of EstimateS manual.
+
+    `n` is the number of individuals and `s` is the number of OTUs.
+
+    """
+    P = np.exp(-n / s)
+    return (max(s, s / (1 - P) - zscore * np.sqrt((s * P / (1 - P)))),
+            s / (1 - P) + zscore * np.sqrt(s * P / (1 - P)))
diff --git a/skbio/diversity/alpha/_gini.py b/skbio/diversity/alpha/_gini.py
new file mode 100644
index 0000000..bdf74f1
--- /dev/null
+++ b/skbio/diversity/alpha/_gini.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+
+from ._base import _validate
+
+
+def gini_index(data, method='rectangles'):
+    """Calculate the Gini index.
+
+    The Gini index is defined as
+
+    .. math::
+
+       G=\\frac{A}{A+B}
+
+    where :math:`A` is the area between :math:`y=x` and the Lorenz curve and
+    :math:`B` is the area under the Lorenz curve. Simplifies to :math:`1-2B`
+    since :math:`A+B=0.5`.
+
+    Parameters
+    ----------
+    data : 1-D array_like
+        Vector of counts, abundances, proportions, etc. All entries must be
+        non-negative.
+    method : {'rectangles', 'trapezoids'}
+        Method for calculating the area under the Lorenz curve. If
+        ``'rectangles'``, connects the Lorenz curve points by lines parallel to
+        the x axis. This is the correct method (in our opinion) though
+        ``'trapezoids'`` might be desirable in some circumstances. If
+        ``'trapezoids'``, connects the Lorenz curve points by linear segments
+        between them. Basically assumes that the given sampling is accurate and
+        that more features of given data would fall on linear gradients between
+        the values of this data.
+
+    Returns
+    -------
+    double
+        Gini index.
+
+    Raises
+    ------
+    ValueError
+        If `method` isn't one of the supported methods for calculating the area
+        under the curve.
+
+    Notes
+    -----
+    The Gini index was introduced in [1]_. The formula for
+    ``method='rectangles'`` is
+
+    .. math::
+
+       dx\\sum_{i=1}^n h_i
+
+    The formula for ``method='trapezoids'`` is
+
+    .. math::
+
+       dx(\\frac{h_0+h_n}{2}+\sum_{i=1}^{n-1} h_i)
+
+    References
+    ----------
+    .. [1] Gini, C. (1912). "Variability and Mutability", C. Cuppini, Bologna,
+       156 pages. Reprinted in Memorie di metodologica statistica (Ed. Pizetti
+       E, Salvemini, T). Rome: Libreria Eredi Virgilio Veschi (1955).
+
+    """
+    # Suppress cast to int because this method supports ints and floats.
+    data = _validate(data, suppress_cast=True)
+    lorenz_points = _lorenz_curve(data)
+    B = _lorenz_curve_integrator(lorenz_points, method)
+    return 1 - 2 * B
+
+
+def _lorenz_curve(data):
+    """Calculate the Lorenz curve for input data.
+
+    Notes
+    -----
+    Formula available on wikipedia.
+
+    """
+    sorted_data = np.sort(data)
+    Sn = sorted_data.sum()
+    n = sorted_data.shape[0]
+    return np.arange(1, n + 1) / n, sorted_data.cumsum() / Sn
+
+
+def _lorenz_curve_integrator(lc_pts, method):
+    """Calculates the area under a Lorenz curve.
+
+    Notes
+    -----
+    Could be utilized for integrating other simple, non-pathological
+    "functions" where width of the trapezoids is constant.
+
+    """
+    x, y = lc_pts
+
+    # each point differs by 1/n
+    dx = 1 / x.shape[0]
+
+    if method == 'trapezoids':
+        # 0 percent of the population has zero percent of the goods
+        h_0 = 0.0
+        h_n = y[-1]
+        # the 0th entry is at x=1/n
+        sum_hs = y[:-1].sum()
+        return dx * ((h_0 + h_n) / 2 + sum_hs)
+    elif method == 'rectangles':
+        return dx * y.sum()
+    else:
+        raise ValueError("Method '%s' not implemented. Available methods: "
+                         "'rectangles', 'trapezoids'." % method)
diff --git a/skbio/diversity/alpha/_lladser.py b/skbio/diversity/alpha/_lladser.py
new file mode 100644
index 0000000..79b6df6
--- /dev/null
+++ b/skbio/diversity/alpha/_lladser.py
@@ -0,0 +1,605 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+
+from ._base import _validate
+
+
+def lladser_pe(counts, r=10):
+    """Calculate single point estimate of conditional uncovered probability.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    r : int, optional
+        Number of new colors that are required for the next prediction.
+
+    Returns
+    -------
+    double
+        Single point estimate of the conditional uncovered probability. May be
+        ``np.nan`` if a point estimate could not be computed.
+
+    See Also
+    --------
+    lladser_ci
+
+    Notes
+    -----
+    This function is just a wrapper around the full point estimator described
+    in Theorem 2 (i) in [1]_, intended to be called for a single best estimate
+    on a complete sample.
+
+    References
+    ----------
+    .. [1] Lladser, Gouet, and Reeder, "Extrapolation of Urn Models via
+       Poissonization: Accurate Measurements of the Microbial Unknown" PLoS
+       2011.
+
+    """
+    counts = _validate(counts)
+    sample = _expand_counts(counts)
+    np.random.shuffle(sample)
+
+    try:
+        pe = list(_lladser_point_estimates(sample, r))[-1][0]
+    except IndexError:
+        pe = np.nan
+
+    return pe
+
+
+def lladser_ci(counts, r, alpha=0.95, f=10, ci_type='ULCL'):
+    """Calculate single CI of the conditional uncovered probability.
+
+    Parameters
+    ----------
+    counts : 1-D array_like, int
+        Vector of counts.
+    r : int
+        Number of new colors that are required for the next prediction.
+    alpha : float, optional
+        Desired confidence level.
+    f : float, optional
+        Ratio between upper and lower bound.
+    ci_type : {'ULCL', 'ULCU', 'U', 'L'}
+        Type of confidence interval. If ``'ULCL'``, upper and lower bounds with
+        conservative lower bound. If ``'ULCU'``, upper and lower bounds with
+        conservative upper bound. If ``'U'``, upper bound only, lower bound
+        fixed to 0.0. If ``'L'``, lower bound only, upper bound fixed to 1.0.
+
+    Returns
+    -------
+    tuple
+        Confidence interval as ``(lower_bound, upper_bound)``.
+
+    See Also
+    --------
+    lladser_pe
+
+    Notes
+    -----
+    This function is just a wrapper around the full CI estimator described
+    in Theorem 2 (iii) in [1]_, intended to be called for a single best CI
+    estimate on a complete sample.
+
+    References
+    ----------
+    .. [1] Lladser, Gouet, and Reeder, "Extrapolation of Urn Models via
+       Poissonization: Accurate Measurements of the Microbial Unknown" PLoS
+       2011.
+
+    """
+    counts = _validate(counts)
+    sample = _expand_counts(counts)
+    np.random.shuffle(sample)
+
+    try:
+        ci = list(_lladser_ci_series(sample, r, alpha, f, ci_type))[-1]
+    except IndexError:
+        ci = (np.nan, np.nan)
+
+    return ci
+
+
+def _expand_counts(counts):
+    """Convert vector of counts at each index to vector of indices."""
+    # From http://stackoverflow.com/a/22671394
+    return np.repeat(np.arange(counts.size), counts)
+
+
+def _lladser_point_estimates(sample, r=10):
+    """Series of point estimates of the conditional uncovered probability.
+
+    Parameters
+    ----------
+    sample : 1-D array_like, int
+        Series of random observations.
+    r : int, optional
+        Number of new colors that are required for the next prediction.
+
+    Returns
+    -------
+    generator
+        Each new color yields a tuple of three elements: the point estimate,
+        position in sample of prediction, and random variable from Poisson
+        process (mostly to make testing easier).
+
+    Raises
+    ------
+    ValueError
+        If `r` is less than or equal to 2.
+
+    Notes
+    -----
+    This is the point estimator described in Theorem 2 (i) in [1]_.
+
+    References
+    ----------
+    .. [1] Lladser, Gouet, and Reeder, "Extrapolation of Urn Models via
+       Poissonization: Accurate Measurements of the Microbial Unknown" PLoS
+       2011.
+
+    """
+    if r <= 2:
+        raise ValueError("r must be greater than or equal to 3.")
+
+    for count, seen, cost, i in _get_interval_for_r_new_otus(sample, r):
+        t = np.random.gamma(count, 1)
+        point_est = (r - 1) / t
+        yield point_est, i, t
+
+
+def _get_interval_for_r_new_otus(seq, r):
+    """Compute interval between r new OTUs for seq of samples.
+
+    Imagine an urn with colored balls. Given a drawing of balls from the urn,
+    compute how many balls need to be looked at to discover r new colors.
+    Colors can be repeated.
+
+    Parameters
+    ----------
+    seq : sequence
+        Series of observations (the actual sample, not the frequencies).
+    r : int
+        Number of new colors that need to be observed for a new interval.
+
+    Returns
+    -------
+    generator
+        For each new color seen for the first time, yields a tuple of four
+        elements: the length of interval (i.e. number of observations looked
+        at), the set of seen colors, position in seq after seeing last new
+        color (end of interval), and position in seq where interval is started.
+
+    """
+    seen = set()
+    seq_len = len(seq)
+
+    # note: first iteration is after looking at first char
+    for i, curr in enumerate(seq):
+        # bail out if there's nothing new
+        if curr in seen:
+            continue
+        else:
+            seen.add(curr)
+
+        # otherwise, need to see distance to get k colors
+        unseen = 0
+        j = i + 1
+        while unseen < r and j < seq_len:
+            if seq[j] not in seen:
+                unseen += 1
+            # note: increments after termination condition
+            j += 1
+
+        # the interval to see r new colors
+        count = j - i - 1
+        # the position in seq after seeing r new ones
+        cost = j
+
+        # bail out if not enough unseen
+        if not count or (unseen < r):
+            raise StopIteration
+
+        # make a copy of seen before yielding, as we'll continue to add to the
+        # set in subsequent iterations
+        yield count, set(seen), cost, i
+
+
+def _lladser_ci_series(seq, r, alpha=0.95, f=10, ci_type='ULCL'):
+    """Construct r-color confidence intervals for uncovered conditional prob.
+
+    Parameters
+    ----------
+    seq : sequence
+        Sequence of colors (the actual sample, not the counts).
+    r : int
+        Number of new colors that are required for the next prediction.
+    alpha : float, optional
+        Desired confidence level.
+    f : float, optional
+        Ratio between upper and lower bound.
+    ci_type : {'ULCL', 'ULCU', 'U', 'L'}
+        Type of confidence interval. If ``'ULCL'``, upper and lower bounds with
+        conservative lower bound. If ``'ULCU'``, upper and lower bounds with
+        conservative upper bound. If ``'U'``, upper bound only, lower bound
+        fixed to 0.0. If ``'L'``, lower bound only, upper bound fixed to 1.0.
+
+    Returns
+    -------
+    generator
+        Yields one CI prediction for each new color that is detected and where.
+
+    """
+    for count, seen, cost, i in _get_interval_for_r_new_otus(seq, r):
+        t = np.random.gamma(count, 1)
+        yield _lladser_ci_from_r(r, t, alpha, f, ci_type)
+
+
+def _lladser_ci_from_r(r, t, alpha=0.95, f=10, ci_type='ULCL'):
+    """Construct r-color confidence interval for uncovered conditional prob.
+
+    Returns
+    -------
+    tuple
+        Confidence interval that contains the true conditional uncovered
+        probability with a probability of 100% * `alpha`.
+
+    Raises
+    ------
+    ValueError
+        For combinations of `r`, `f`, and `alpha` that do not have precomputed
+        results.
+
+    """
+    alpha = round(alpha, 2)
+
+    if ci_type == 'U':
+        if alpha != 0.95:
+            raise ValueError("alpha must be 0.95 if ci_type is 'U'.")
+        if r not in _UPPER_CONFIDENCE_BOUND:
+            raise ValueError("r must be between 1-25 or 50 if ci_type is 'U'.")
+        return 0.0, _UPPER_CONFIDENCE_BOUND[r] / t
+    elif ci_type == 'L':
+        if alpha != 0.95:
+            raise ValueError("alpha must be 0.95 if ci_type is 'L'.")
+        if r not in _LOWER_CONFIDENCE_BOUND:
+            raise ValueError("r must be between 1-25 if ci_type is 'L'.")
+        return _LOWER_CONFIDENCE_BOUND[r] / t, 1.0
+
+    bound_params = _ul_confidence_bounds(f, r, alpha)
+    if ci_type == 'ULCL':
+        bound_param = bound_params[0]
+    elif ci_type == 'ULCU':
+        bound_param = bound_params[1]
+    else:
+        raise ValueError("Unknown ci_type '%s'." % ci_type)
+
+    upper_bound = bound_param * f / t
+    lower_bound = bound_param / t
+
+    # make sure upper bound is at most 1
+    if upper_bound > 1:
+        upper_bound = 1.0
+
+    return lower_bound, upper_bound
+
+
+def _ul_confidence_bounds(f, r, alpha):
+    """Return confidence bounds based on ratio and alpha.
+
+    This function is just a lookup of some precomputed values.
+
+    Parameters
+    ----------
+    f : float
+        Desired ratio of upper to lower bound.
+    r : int
+        Number of new colors.
+    alpha : float
+        Confidence interval (for 95% confidence use 0.95).
+
+    Returns
+    -------
+    tuple
+        Constants ``(c_1, c_2)`` such that the confidence interval is
+        ``[c_1/T_r, c_1*f/T_r]`` for conservative lower bound intervals and
+        ``[c_2/T_r, c_2*f/T_r]`` for conservative upper bound intervals.
+
+    """
+    a = None
+    b = None
+
+    if (f, r, alpha) in _PRECOMPUTED_TABLE:
+        return _PRECOMPUTED_TABLE[(f, r, alpha)]
+
+    # all others combination are only computed for f=10
+    # and alpha = 0.90, 0.95 and 0.99
+    if f == 10 and r <= 50:
+        if alpha in _CBS and r < len(_CBS[alpha]):
+            a, b = _CBS[alpha][r]
+
+    if a is None or b is None:
+        raise ValueError("No constants are precomputed for the combination of "
+                         "f=%f, r=%d, and alpha=%.2f" % (f, r, alpha))
+    return a, b
+
+
+# Maps r to a constant c such that the 95% confidence interval with lower bound
+# fixed at 0 is [0, c/T_r]. This constant is constant b according to
+# Theorem 2 (iii) in the paper with a=0, aka c_0 from Table 3.
+_UPPER_CONFIDENCE_BOUND = {
+    1: 2.995732274,
+    2: 4.743864518,
+    3: 6.295793622,
+    4: 7.753656528,
+    5: 9.153519027,
+    6: 10.51303491,
+    7: 11.84239565,
+    8: 13.14811380,
+    9: 14.43464972,
+    10: 15.70521642,
+    11: 16.96221924,
+    12: 18.20751425,
+    13: 19.44256933,
+    14: 20.66856908,
+    15: 21.88648591,
+    16: 23.09712976,
+    17: 24.30118368,
+    18: 25.49923008,
+    19: 26.69177031,
+    20: 27.87923964,
+    21: 29.06201884,
+    22: 30.24044329,
+    23: 31.41481021,
+    24: 32.58538445,
+    25: 33.75240327,
+    50: 62.17105670
+}
+
+
+# Maps r to a constant c such that the 95% confidence interval with upper bound
+# fixed at 1 is [c/T_r, 1]. This constant is constant b according to
+# Theorem 2 (iii) in the paper with b=1, aka c_3 from Table 3.
+_LOWER_CONFIDENCE_BOUND = {
+    1: 0.051293294,
+    2: 0.355361510,
+    3: 0.817691447,
+    4: 1.366318397,
+    5: 1.970149568,
+    6: 2.613014744,
+    7: 3.285315692,
+    8: 3.980822786,
+    9: 4.695227540,
+    10: 5.425405697,
+    11: 6.169007289,
+    12: 6.924212514,
+    13: 7.689578292,
+    14: 8.463937522,
+    15: 9.246330491,
+    16: 10.03595673,
+    17: 10.83214036,
+    18: 11.63430451,
+    19: 12.44195219,
+    20: 13.25465160,
+    21: 14.07202475,
+    22: 14.89373854,
+    23: 15.71949763,
+    24: 16.54903871,
+    25: 17.38212584
+}
+
+
+# Hack in some special values we used for the paper.
+# Since Manuel needs to compute those semi-automatically
+# using Maple, we pre-calculate only a few common ones
+
+# precomputed table is {(f, r, alpha):(c_1, c_2)}
+_PRECOMPUTED_TABLE = {
+    (2, 50, 0.95): (31.13026306, 38.94718565),
+    (2, 33, 0.95): (22.3203508, 23.4487304),
+    (1.5, 100, 0.95): (79.0424349, 83.22790086),
+    (1.5, 94, 0.95): (75.9077267, 76.5492088),
+    (2.5, 19, 0.95): (11.26109001, 11.96814857),
+
+    # In the next block for each f, we report the smallest possible value
+    # of r from table 4 in the paper
+    (80, 2, 0.95): (0.0598276655, 0.355361510),
+    (48, 2, 0.95): (0.1013728884, 0.355358676),
+    (40, 2, 0.95): (0.1231379857, 0.355320458),
+    (24, 2, 0.95): (0.226833483, 0.346045204),
+    (20, 3, 0.95): (0.320984257, 0.817610455),
+    (12, 3, 0.95): (0.590243030, 0.787721610),
+    (10, 4, 0.95): (0.806026244, 1.360288674),
+    (6, 6, 0.95): (1.8207383, 2.58658608),
+    (5, 7, 0.95): (2.48303930, 3.22806682),
+    (3, 14, 0.95): (7.17185045, 8.27008349),
+    (2.5, 19, 0.95): (11.26109001, 11.96814857),
+    (1.5, 94, 0.95): (75.9077267, 76.5492088),
+    (1.25, 309, 0.95): (275.661191, 275.949782)
+}
+
+
+# Below are the values used for Theorem 3 iii
+# Values hand computed by Manuel Lladser using Maple. For each alpha (0.90,
+# 0.95, and 0.99), there is a list mapping r to (c_1, c_2), where r is used as
+# an index into the list.
+
+_CB_90 = [
+    (None, None),  # 0, makes indexing easier
+    (None, None),  # no feasible solution
+    (None, None),  # no feasible solution
+    (.5635941995, 1.095834700),
+    (.6764656264, 1.744588615),
+    (.8018565594, 2.432587343),
+    (.9282215025, 3.151897973),
+    (1.053433716, 3.894766804),
+    (1.177158858, 4.656118177),
+    (1.299491033, 5.432468058),
+    (1.420604842, 6.221304605),  # 10
+    (1.540665805, 7.020746595),
+    (1.659812701, 7.829342026),
+    (1.778158703, 8.645942495),
+    (1.895796167, 9.469621185),
+    (2.012801198, 10.29961731),
+    (2.129237257, 11.13529724),
+    (2.245157877, 11.97612664),
+    (2.360608695, 12.82164994),
+    (2.475628991, 13.67147502),
+    (2.590252861, 14.52526147),  # 20
+    (2.704510123, 15.38271151),
+    (2.818427036, 16.24356290),
+    (2.932026869, 17.10758326),
+    (3.045330351, 17.97456551),
+    (3.158356050, 18.84432420),
+    (None, None),  # not computed
+    (None, None),
+    (None, None),
+    (None, None),
+    (3.719850286, 23.22944415),  # 30
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (4.828910181, 32.13892224),  # 40
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (5.924900191, 41.17906791)  # 50
+]
+
+_CB_95 = [
+    (None, None),  # 0
+    (None, None),
+    (None, None),
+    (None, None),
+    (.8060262438, 1.360288674),  # 4
+    (.9240311584, 1.969902537),
+    (1.053998892, 2.613007253),
+    (1.185086998, 3.285315518),
+    (1.315076337, 3.980822783),
+    (4.695227540, 4.695227541),
+    (1.570546801, 5.425405698),  # 10
+    (1.696229569, 6.169007289),
+    (1.820753729, 6.924212513),
+    (1.944257622, 7.689578291),
+    (2.066857113, 8.463937522),
+    (2.188648652, 9.246330491),
+    (2.309712994, 10.03595673),
+    (2.430118373, 10.83214036),
+    (2.549923010, 11.63430451),
+    (2.669177032, 12.44195219),
+    (2.787923964, 13.25465160),  # 20
+    (2.906201884, 14.07202475),
+    (3.024044329, 14.89373854),
+    (3.141481020, 15.71949763),
+    (3.258538445, 16.54903871),
+    (3.375240327, 17.38212584),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (3.954097220, 21.59397923),  # 30
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (5.093973695, 30.19573919),  # 40
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (6.217105673, 38.96473258)  # 50
+]
+
+_CB_99 = [
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (1.360316290, 1.768978323),
+    (1.470856924, 2.329171347),
+    (1.604478487, 2.906049304),
+    (1.741759456, 3.507452949),
+    (1.878809285, 4.130199076),  # 10
+    (2.014632329, 4.771246173),
+    (2.149044735, 5.428180734),
+    (2.282101533, 6.099073460),
+    (2.413917374, 6.782354878),
+    (2.544610844, 7.476728267),
+    (2.674289153, 8.181107778),
+    (2.803045614, 8.894573463),
+    (2.930960779, 9.616337916),
+    (3.058104355, 10.34572103),
+    (3.184536992, 11.08213063),  # 20
+    (3.310311816, 11.82504734),
+    (3.435475649, 12.57401269),
+    (3.560070013, 13.32861956),
+    (3.684131925, 14.08850448),
+    (3.807694563, 14.85334135),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (4.41897094, 18.7424258),  # 30
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (5.61643962, 26.7700386),  # 40
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (None, None),
+    (6.79033616, 35.0324474)  # 50
+]
+
+_CBS = {
+    0.90: _CB_90,
+    0.95: _CB_95,
+    0.99: _CB_99
+}
diff --git a/skbio/diversity/alpha/tests/__init__.py b/skbio/diversity/alpha/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/diversity/alpha/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/diversity/alpha/tests/test_ace.py b/skbio/diversity/alpha/tests/test_ace.py
new file mode 100644
index 0000000..65de9e5
--- /dev/null
+++ b/skbio/diversity/alpha/tests/test_ace.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+from nose.tools import assert_almost_equal, assert_raises
+
+from skbio.diversity.alpha import ace
+
+
+def test_ace():
+    assert_almost_equal(ace(np.array([2, 0])), 1.0)
+    assert_almost_equal(ace(np.array([12, 0, 9])), 2.0)
+    assert_almost_equal(ace(np.array([12, 2, 8])), 3.0)
+    assert_almost_equal(ace(np.array([12, 2, 1])), 4.0)
+    assert_almost_equal(ace(np.array([12, 1, 2, 1])), 7.0)
+    assert_almost_equal(ace(np.array([12, 3, 2, 1])), 4.6)
+    assert_almost_equal(ace(np.array([12, 3, 6, 1, 10])), 5.62749672)
+
+    # Just returns the number of OTUs when all are abundant.
+    assert_almost_equal(ace(np.array([12, 12, 13, 14])), 4.0)
+
+    # Border case: only singletons and 10-tons, no abundant OTUs.
+    assert_almost_equal(ace([0, 1, 1, 0, 0, 10, 10, 1, 0, 0]), 9.35681818182)
+
+
+def test_ace_only_rare_singletons():
+    with assert_raises(ValueError):
+        ace([0, 0, 43, 0, 1, 0, 1, 42, 1, 43])
+
+
+if __name__ == '__main__':
+    import nose
+    nose.runmodule()
diff --git a/skbio/diversity/alpha/tests/test_base.py b/skbio/diversity/alpha/tests/test_base.py
new file mode 100644
index 0000000..d418b02
--- /dev/null
+++ b/skbio/diversity/alpha/tests/test_base.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio.diversity.alpha import (
+    berger_parker_d, brillouin_d, dominance, doubles, enspie, equitability,
+    esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q, margalef,
+    mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit, observed_otus,
+    osd, robbins, shannon, simpson, simpson_e, singles, strong)
+from skbio.diversity.alpha._base import _validate
+
+
+class BaseTests(TestCase):
+    def setUp(self):
+        self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
+
+    def test_validate(self):
+        # python list
+        obs = _validate([0, 2, 1, 3])
+        npt.assert_array_equal(obs, np.array([0, 2, 1, 3]))
+        self.assertEqual(obs.dtype, int)
+
+        # numpy array (no copy made)
+        data = np.array([0, 2, 1, 3])
+        obs = _validate(data)
+        npt.assert_array_equal(obs, data)
+        self.assertEqual(obs.dtype, int)
+        self.assertTrue(obs is data)
+
+        # single element
+        obs = _validate([42])
+        npt.assert_array_equal(obs, np.array([42]))
+        self.assertEqual(obs.dtype, int)
+        self.assertEqual(obs.shape, (1,))
+
+        # suppress casting to int
+        obs = _validate([42.2, 42.1, 0], suppress_cast=True)
+        npt.assert_array_equal(obs, np.array([42.2, 42.1, 0]))
+        self.assertEqual(obs.dtype, float)
+
+        # all zeros
+        obs = _validate([0, 0, 0])
+        npt.assert_array_equal(obs, np.array([0, 0, 0]))
+        self.assertEqual(obs.dtype, int)
+
+        # all zeros (single value)
+        obs = _validate([0])
+        npt.assert_array_equal(obs, np.array([0]))
+        self.assertEqual(obs.dtype, int)
+
+    def test_validate_invalid_input(self):
+        # wrong dtype
+        with self.assertRaises(TypeError):
+            _validate([0, 2, 1.2, 3])
+
+        # wrong number of dimensions (2-D)
+        with self.assertRaises(ValueError):
+            _validate([[0, 2, 1, 3], [4, 5, 6, 7]])
+
+        # wrong number of dimensions (scalar)
+        with self.assertRaises(ValueError):
+            _validate(1)
+
+        # negative values
+        with self.assertRaises(ValueError):
+            _validate([0, 0, 2, -1, 3])
+
+    def test_berger_parker_d(self):
+        self.assertEqual(berger_parker_d(np.array([5])), 1)
+        self.assertEqual(berger_parker_d(np.array([5, 5])), 0.5)
+        self.assertEqual(berger_parker_d(np.array([1, 1, 1, 1, 0])), 0.25)
+        self.assertEqual(berger_parker_d(self.counts), 5 / 22)
+
+    def test_brillouin_d(self):
+        self.assertAlmostEqual(brillouin_d(np.array([1, 2, 0, 0, 3, 1])),
+                               0.86289353018248782)
+
+    def test_dominance(self):
+        self.assertEqual(dominance(np.array([5])), 1)
+        self.assertAlmostEqual(dominance(np.array([1, 0, 2, 5, 2])), 0.34)
+
+    def test_doubles(self):
+        self.assertEqual(doubles(self.counts), 3)
+        self.assertEqual(doubles(np.array([0, 3, 4])), 0)
+        self.assertEqual(doubles(np.array([2])), 1)
+        self.assertEqual(doubles(np.array([0, 0])), 0)
+
+    def test_enspie(self):
+        # Totally even community should have ENS_pie = number of OTUs.
+        self.assertAlmostEqual(enspie(np.array([1, 1, 1, 1, 1, 1])), 6)
+        self.assertAlmostEqual(enspie(np.array([13, 13, 13, 13])), 4)
+
+        # Hand calculated.
+        arr = np.array([1, 41, 0, 0, 12, 13])
+        exp = 1 / ((arr / arr.sum()) ** 2).sum()
+        self.assertAlmostEqual(enspie(arr), exp)
+
+        # Using dominance.
+        exp = 1 / dominance(arr)
+        self.assertAlmostEqual(enspie(arr), exp)
+
+        arr = np.array([1, 0, 2, 5, 2])
+        exp = 1 / dominance(arr)
+        self.assertAlmostEqual(enspie(arr), exp)
+
+    def test_equitability(self):
+        self.assertAlmostEqual(equitability(np.array([5, 5])), 1)
+        self.assertAlmostEqual(equitability(np.array([1, 1, 1, 1, 0])), 1)
+
+    def test_esty_ci(self):
+        def _diversity(indices, f):
+            """Calculate diversity index for each window of size 1.
+
+            indices: vector of indices of OTUs
+            f: f(counts) -> diversity measure
+
+            """
+            result = []
+            max_size = max(indices) + 1
+            freqs = np.zeros(max_size, dtype=int)
+            for i in range(len(indices)):
+                freqs += np.bincount(indices[i:i + 1], minlength=max_size)
+                try:
+                    curr = f(freqs)
+                except (ZeroDivisionError, FloatingPointError):
+                    curr = 0
+                result.append(curr)
+            return np.array(result)
+
+        data = [1, 1, 2, 1, 1, 3, 2, 1, 3, 4]
+
+        observed_lower, observed_upper = zip(*_diversity(data, esty_ci))
+
+        expected_lower = np.array([1, -1.38590382, -0.73353593, -0.17434465,
+                                   -0.15060902, -0.04386191, -0.33042054,
+                                   -0.29041008, -0.43554755, -0.33385652])
+        expected_upper = np.array([1, 1.38590382, 1.40020259, 0.67434465,
+                                   0.55060902, 0.71052858, 0.61613483,
+                                   0.54041008, 0.43554755, 0.53385652])
+
+        npt.assert_array_almost_equal(observed_lower, expected_lower)
+        npt.assert_array_almost_equal(observed_upper, expected_upper)
+
+    def test_fisher_alpha(self):
+        exp = 2.7823795367398798
+        arr = np.array([4, 3, 4, 0, 1, 0, 2])
+        obs = fisher_alpha(arr)
+        self.assertAlmostEqual(obs, exp)
+
+        # Should depend only on S and N (number of OTUs, number of
+        # individuals / seqs), so we should obtain the same output as above.
+        obs = fisher_alpha([1, 6, 1, 0, 1, 0, 5])
+        self.assertAlmostEqual(obs, exp)
+
+        # Should match another by hand:
+        # 2 OTUs, 62 seqs, alpha is 0.39509
+        obs = fisher_alpha([61, 0, 0, 1])
+        self.assertAlmostEqual(obs, 0.39509, delta=0.0001)
+
+        # Test case where we have >1000 individuals (SDR-IV makes note of this
+        # case). Verified against R's vegan::fisher.alpha.
+        obs = fisher_alpha([999, 0, 10])
+        self.assertAlmostEqual(obs, 0.2396492)
+
+    def test_goods_coverage(self):
+        counts = [1] * 75 + [2, 2, 2, 2, 2, 2, 3, 4, 4]
+        obs = goods_coverage(counts)
+        self.assertAlmostEqual(obs, 0.23469387755)
+
+    def test_heip_e(self):
+        # Calculate "by hand".
+        arr = np.array([1, 2, 3, 1])
+        h = shannon(arr, base=np.e)
+        expected = (np.exp(h) - 1) / 3
+        self.assertEqual(heip_e(arr), expected)
+
+        # From Statistical Ecology: A Primer in Methods and Computing, page 94,
+        # table 8.1.
+        self.assertAlmostEqual(heip_e([500, 300, 200]), 0.90, places=2)
+        self.assertAlmostEqual(heip_e([500, 299, 200, 1]), 0.61, places=2)
+
+    def test_kempton_taylor_q(self):
+        # Approximate Magurran 1998 calculation p143.
+        arr = np.array([2, 3, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 9, 9, 11, 14,
+                        15, 15, 20, 29, 33, 34, 36, 37, 53, 57, 138, 146, 170])
+        exp = 14 / np.log(34 / 4)
+        self.assertAlmostEqual(kempton_taylor_q(arr), exp)
+
+        # Should get same answer regardless of input order.
+        np.random.shuffle(arr)
+        self.assertAlmostEqual(kempton_taylor_q(arr), exp)
+
+    def test_margalef(self):
+        self.assertEqual(margalef(self.counts), 8 / np.log(22))
+
+    def test_mcintosh_d(self):
+        self.assertAlmostEqual(mcintosh_d(np.array([1, 2, 3])),
+                               0.636061424871458)
+
+    def test_mcintosh_e(self):
+        num = np.sqrt(15)
+        den = np.sqrt(19)
+        exp = num / den
+        self.assertEqual(mcintosh_e(np.array([1, 2, 3, 1])), exp)
+
+    def test_menhinick(self):
+        # observed_otus = 9, total # of individuals = 22
+        self.assertEqual(menhinick(self.counts), 9 / np.sqrt(22))
+
+    def test_michaelis_menten_fit(self):
+        obs = michaelis_menten_fit([22])
+        self.assertAlmostEqual(obs, 1.0)
+
+        obs = michaelis_menten_fit([42])
+        self.assertAlmostEqual(obs, 1.0)
+
+        obs = michaelis_menten_fit([34], num_repeats=3, params_guess=(13, 13))
+        self.assertAlmostEqual(obs, 1.0)
+
+        obs = michaelis_menten_fit([70, 70], num_repeats=5)
+        self.assertAlmostEqual(obs, 2.0, places=1)
+
+        obs_few = michaelis_menten_fit(np.arange(4) * 2, num_repeats=10)
+        obs_many = michaelis_menten_fit(np.arange(4) * 100, num_repeats=10)
+        # [0,100,200,300] looks like only 3 OTUs.
+        self.assertAlmostEqual(obs_many, 3.0, places=1)
+        # [0,2,4,6] looks like 3 OTUs with maybe more to be found.
+        self.assertTrue(obs_few > obs_many)
+
+    def test_observed_otus(self):
+        obs = observed_otus(np.array([4, 3, 4, 0, 1, 0, 2]))
+        self.assertEqual(obs, 5)
+
+        obs = observed_otus(np.array([0, 0, 0]))
+        self.assertEqual(obs, 0)
+
+        obs = observed_otus(self.counts)
+        self.assertEqual(obs, 9)
+
+    def test_osd(self):
+        self.assertEqual(osd(self.counts), (9, 3, 3))
+
+    def test_robbins(self):
+        self.assertEqual(robbins(np.array([1, 2, 3, 0, 1])), 2 / 7)
+
+    def test_shannon(self):
+        self.assertEqual(shannon(np.array([5])), 0)
+        self.assertEqual(shannon(np.array([5, 5])), 1)
+        self.assertEqual(shannon(np.array([1, 1, 1, 1, 0])), 2)
+
+    def test_simpson(self):
+        self.assertAlmostEqual(simpson(np.array([1, 0, 2, 5, 2])), 0.66)
+        self.assertAlmostEqual(simpson(np.array([5])), 0)
+
+    def test_simpson_e(self):
+        # A totally even community should have simpson_e = 1.
+        self.assertEqual(simpson_e(np.array([1, 1, 1, 1, 1, 1, 1])), 1)
+
+        arr = np.array([0, 30, 25, 40, 0, 0, 5])
+        freq_arr = arr / arr.sum()
+        D = (freq_arr ** 2).sum()
+        exp = 1 / (D * 4)
+        obs = simpson_e(arr)
+        self.assertEqual(obs, exp)
+
+        # From:
+        # https://groups.nceas.ucsb.edu/sun/meetings/calculating-evenness-
+        #   of-habitat-distributions
+        arr = np.array([500, 400, 600, 500])
+        D = 0.0625 + 0.04 + 0.09 + 0.0625
+        exp = 1 / (D * 4)
+        self.assertEqual(simpson_e(arr), exp)
+
+    def test_singles(self):
+        self.assertEqual(singles(self.counts), 3)
+        self.assertEqual(singles(np.array([0, 3, 4])), 0)
+        self.assertEqual(singles(np.array([1])), 1)
+        self.assertEqual(singles(np.array([0, 0])), 0)
+
+    def test_strong(self):
+        self.assertAlmostEqual(strong(np.array([1, 2, 3, 1])), 0.214285714)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/diversity/alpha/tests/test_chao1.py b/skbio/diversity/alpha/tests/test_chao1.py
new file mode 100644
index 0000000..45a1fa5
--- /dev/null
+++ b/skbio/diversity/alpha/tests/test_chao1.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio.diversity.alpha import chao1, chao1_ci
+from skbio.diversity.alpha._chao1 import _chao1_var
+
+
+class Chao1Tests(TestCase):
+    def setUp(self):
+        self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
+        self.no_singles = np.array([0, 2, 2, 4, 5, 0, 0, 0, 0, 0])
+        self.no_doubles = np.array([0, 1, 1, 4, 5, 0, 0, 0, 0, 0])
+
+    def test_chao1(self):
+        self.assertEqual(chao1(self.counts), 9.75)
+        self.assertEqual(chao1(self.counts, bias_corrected=False), 10.5)
+
+        self.assertEqual(chao1(self.no_singles), 4)
+        self.assertEqual(chao1(self.no_singles, bias_corrected=False), 4)
+
+        self.assertEqual(chao1(self.no_doubles), 5)
+        self.assertEqual(chao1(self.no_doubles, bias_corrected=False), 5)
+
+    def test_chao1_ci(self):
+        # Should match observed results from EstimateS. NOTE: EstimateS rounds
+        # to 2 dp.
+        obs = chao1_ci(self.counts)
+        npt.assert_allclose(obs, (9.07, 17.45), rtol=0.01)
+
+        obs = chao1_ci(self.counts, bias_corrected=False)
+        npt.assert_allclose(obs, (9.17, 21.89), rtol=0.01)
+
+        obs = chao1_ci(self.no_singles)
+        npt.assert_array_almost_equal(obs, (4, 4.95), decimal=2)
+
+        obs = chao1_ci(self.no_singles, bias_corrected=False)
+        npt.assert_array_almost_equal(obs, (4, 4.95), decimal=2)
+
+        obs = chao1_ci(self.no_doubles)
+        npt.assert_array_almost_equal(obs, (4.08, 17.27), decimal=2)
+
+        obs = chao1_ci(self.no_doubles, bias_corrected=False)
+        npt.assert_array_almost_equal(obs, (4.08, 17.27), decimal=2)
+
+    def test_chao1_var(self):
+        # Should match observed results from EstimateS.NOTE: EstimateS reports
+        # sd, not var, and rounds to 2 dp.
+        obs = _chao1_var(self.counts)
+        npt.assert_allclose(obs, 1.42 ** 2, rtol=0.01)
+
+        obs = _chao1_var(self.counts, bias_corrected=False)
+        npt.assert_allclose(obs, 2.29 ** 2, rtol=0.01)
+
+        obs = _chao1_var(self.no_singles)
+        self.assertAlmostEqual(obs, 0.39 ** 2, delta=0.01)
+
+        obs = _chao1_var(self.no_singles, bias_corrected=False)
+        self.assertAlmostEqual(obs, 0.39 ** 2, delta=0.01)
+
+        obs = _chao1_var(self.no_doubles)
+        self.assertAlmostEqual(obs, 2.17 ** 2, delta=0.01)
+
+        obs = _chao1_var(self.no_doubles, bias_corrected=False)
+        self.assertAlmostEqual(obs, 2.17 ** 2, delta=0.01)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/diversity/alpha/tests/test_gini.py b/skbio/diversity/alpha/tests/test_gini.py
new file mode 100644
index 0000000..e04c99a
--- /dev/null
+++ b/skbio/diversity/alpha/tests/test_gini.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio.diversity.alpha import gini_index
+from skbio.diversity.alpha._gini import (
+    _lorenz_curve, _lorenz_curve_integrator)
+
+
+class GiniTests(TestCase):
+    def setUp(self):
+        self.data = np.array([4.5, 6.7, 3.4, 15., 18., 3.5, 6.7, 14.1])
+        self.lorenz_curve_points = (
+            np.array([0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]),
+            np.array([0.047287899860917935, 0.095966620305980521,
+                      0.15855354659248957, 0.2517385257301808,
+                      0.34492350486787204, 0.541029207232267,
+                      0.74965229485396379, 1.0]))
+
+    def test_gini_index(self):
+        exp = 0.32771210013908214
+        obs = gini_index(self.data, 'trapezoids')
+        self.assertAlmostEqual(obs, exp)
+
+        exp = 0.20271210013908214
+        obs = gini_index(self.data, 'rectangles')
+        self.assertAlmostEqual(obs, exp)
+
+        # Raises error on negative data.
+        with self.assertRaises(ValueError):
+            gini_index([1.0, -3.1, 4.5])
+
+    def test_lorenz_curve(self):
+        npt.assert_array_almost_equal(_lorenz_curve(self.data),
+                                      self.lorenz_curve_points)
+
+    def test_lorenz_curve_integrator(self):
+        exp = 0.33614394993045893
+        obs = _lorenz_curve_integrator(self.lorenz_curve_points, 'trapezoids')
+        self.assertAlmostEqual(obs, exp)
+
+        exp = 0.39864394993045893
+        obs = _lorenz_curve_integrator(self.lorenz_curve_points, 'rectangles')
+        self.assertAlmostEqual(obs, exp)
+
+        # Raises error on invalid method.
+        with self.assertRaises(ValueError):
+            _lorenz_curve_integrator(self.lorenz_curve_points, 'brofist')
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/diversity/alpha/tests/test_lladser.py b/skbio/diversity/alpha/tests/test_lladser.py
new file mode 100644
index 0000000..9ea8b13
--- /dev/null
+++ b/skbio/diversity/alpha/tests/test_lladser.py
@@ -0,0 +1,242 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+import numpy.testing as npt
+from nose.tools import (assert_equal, assert_almost_equal, assert_raises,
+                        assert_true)
+
+from skbio.stats import subsample_counts
+from skbio.diversity.alpha import lladser_pe, lladser_ci
+from skbio.diversity.alpha._lladser import (
+    _expand_counts, _lladser_point_estimates,
+    _get_interval_for_r_new_otus, _lladser_ci_series, _lladser_ci_from_r)
+
+
+def create_fake_observation():
+    """Create a subsample with defined property"""
+
+    # Create a subsample of a larger sample such that we can compute
+    # the expected probability of the unseen portion.
+    # This is used in the tests of lladser_pe and lladser_ci
+    counts = np.ones(1001, dtype='int64')
+    counts[0] = 9000
+    total = counts.sum()
+
+    fake_obs = subsample_counts(counts, 1000)
+    exp_p = 1 - sum([x/total for (x, y) in zip(counts, fake_obs) if y > 0])
+
+    return fake_obs, exp_p
+
+
+def test_lladser_pe():
+    """lladser_pe returns point estimates within the expected variance"""
+
+    obs = lladser_pe([3], r=4)
+    assert_true(np.isnan(obs))
+
+    np.random.seed(123456789)
+    fake_obs, exp_p = create_fake_observation()
+    reps = 100
+    sum = 0
+    for i in range(reps):
+        sum += lladser_pe(fake_obs, r=30)
+    obs = sum / reps
+
+    # Estimator has variance of (1-p)^2/(r-2),
+    # which for r=30 and p~=0.9 is 0.0289
+    assert_almost_equal(obs, exp_p, delta=0.03)
+
+
+def test_lladser_ci_nan():
+    """lladser_ci returns nan if sample is too short to make an estimate"""
+
+    obs = lladser_ci([3], r=4)
+    assert_true(len(obs) == 2 and np.isnan(obs[0]) and np.isnan(obs[1]))
+
+
+def test_lladser_ci():
+    """lladser_ci estimate using defaults contains p with 95% prob"""
+
+    np.random.seed(12345678)
+    reps = 100
+    sum = 0
+    for i in range(reps):
+        fake_obs, exp_p = create_fake_observation()
+        (low, high) = lladser_ci(fake_obs, r=10)
+        if (low <= exp_p <= high):
+            sum += 1
+
+    assert_true(sum/reps >= 0.95)
+
+
+def test_lladser_ci_f3():
+    """lladser_ci estimate using f=3 contains p with 95% prob"""
+
+    # Test different values of f=3 and r=14, which lie exactly on the
+    # 95% interval line. For 100 reps using simple cumulative binomial
+    # probs we expect to have more than 5 misses of the interval in 38%
+    # of all test runs. To make this test pass reliable we thus have to
+    # set a defined seed
+    np.random.seed(12345678)
+    reps = 100
+    sum = 0
+    for i in range(reps):
+        # re-create the obs for every estimate, such that they are truly
+        # independent events
+        fake_obs, exp_p = create_fake_observation()
+        (low, high) = lladser_ci(fake_obs, r=14, f=3)
+        if (low <= exp_p <= high):
+            sum += 1
+
+    assert_true(sum/reps >= 0.95)
+
+
+def test_expand_counts():
+    arr = np.array([2, 0, 1, 2])
+    npt.assert_array_equal(_expand_counts(arr), np.array([0, 0, 2, 3, 3]))
+
+
+def test_lladser_point_estimates():
+    s = [5, 1, 5, 1, 2, 3, 1, 5, 3, 2, 5, 3]
+    r = 3
+    observed = list(_lladser_point_estimates(s, r))
+    assert_equal(len(observed), 3)
+
+    for k in range(3):
+        x = observed[k]
+        t = x[2]
+        assert_equal(x[0], (r - 1) / t)
+
+    # Estimator has variance of (1-p)^2/(r-2),
+    # which for r=7 and p=0.5 is 0.05
+    seq = "WBWBWBWBWBWBWBWBWBWBWBWBWBWBWBWBWBW"
+    reps = 1000
+    sum = 0
+    for i in range(reps):
+        p, _, _ = list(_lladser_point_estimates(seq, r=7))[0]
+        sum += p
+    assert_true(0.45 < sum / reps and sum / reps < 0.55)
+
+
+def test_lladser_point_estimates_invalid_r():
+    with assert_raises(ValueError):
+        list(_lladser_point_estimates([5, 1, 5, 1, 2, 3, 1, 5, 3, 2, 5, 3], 2))
+
+
+def test_get_interval_for_r_new_otus():
+    s = [5, 1, 5, 1, 2, 3, 1, 5, 3, 2, 5]
+    expected = [(3, set([5]), 4, 0),
+                (4, set([5, 1]), 6, 1),
+                (4, set([5, 1, 2]), 9, 4)]
+    for x, y in zip(_get_interval_for_r_new_otus(s, 2), expected):
+        assert_equal(x, y)
+
+    s = [5, 5, 5, 5, 5]
+    # never saw new one
+    assert_equal(list(_get_interval_for_r_new_otus(s, 2)), [])
+
+
+def test_lladser_ci_series_exact():
+    # have seen RWB
+    urn_1 = 'RWBWWBWRRWRYWRPPZ'
+    results = list(_lladser_ci_series(urn_1, r=4))
+    assert_equal(len(results), 3)
+
+
+def test_lladser_ci_series_random():
+    seq = "WBWBWBWBWBWB"
+    observations = []
+    alpha = 0.95
+    reps = 1000
+    for i in range(reps):
+        obs = list(_lladser_ci_series(seq, r=4, alpha=alpha))[0]
+        observations.append(obs)
+    tps = list(filter(lambda a_b: a_b[0] < 0.5 and 0.5 < a_b[1], observations))
+    assert_true(len(tps) >= alpha * reps)  # 100%-95%
+
+
+def test_lladser_ci_from_r():
+    f = 10
+    t = 10
+    r = 4
+    obs_low, obs_high = _lladser_ci_from_r(r=r, t=t, f=f)
+    assert_almost_equal(obs_low, 0.0806026244)
+    assert_almost_equal(obs_high, 0.806026244)
+
+    r = 20
+    t = 100
+    obs_low, obs_high = _lladser_ci_from_r(r=r, t=t, f=f)
+    assert_almost_equal(obs_low, 0.02787923964)
+    assert_almost_equal(obs_high, 0.2787923964)
+
+    # make sure we test with each possible alpha
+    alpha = 0.99
+    obs_low, obs_high = _lladser_ci_from_r(r=r, t=t, f=f, alpha=alpha)
+    assert_almost_equal(obs_low, 0.03184536992)
+    assert_almost_equal(obs_high, 0.3184536992)
+
+    alpha = 0.9
+    r = 3
+    obs_low, obs_high = _lladser_ci_from_r(r=r, t=t, f=f, alpha=alpha)
+    assert_almost_equal(obs_low, 0.005635941995)
+    assert_almost_equal(obs_high, 0.05635941995)
+
+    # test other ci_types
+    ci_type = 'ULCU'
+    obs_low, obs_high = _lladser_ci_from_r(r=r, t=t, f=f, alpha=alpha,
+                                           ci_type=ci_type)
+    assert_almost_equal(obs_low, 0.01095834700)
+    assert_almost_equal(obs_high, 0.1095834700)
+
+    alpha = 0.95
+    t = 10
+    ci_type = 'U'
+    obs_low, obs_high = _lladser_ci_from_r(r=r, t=t, f=f, alpha=alpha,
+                                           ci_type=ci_type)
+    assert_almost_equal(obs_low, 0)
+    assert_almost_equal(obs_high, 0.6295793622)
+
+    ci_type = 'L'
+    obs_low, obs_high = _lladser_ci_from_r(r=r, t=t, f=f, alpha=alpha,
+                                           ci_type=ci_type)
+    assert_almost_equal(obs_low, 0.0817691447)
+    assert_almost_equal(obs_high, 1)
+
+
+def test_lladser_ci_from_r_invalid_input():
+    # unsupported alpha for ci_type='U'
+    with assert_raises(ValueError):
+        _lladser_ci_from_r(r=3, t=10, f=10, alpha=0.90, ci_type='U')
+
+    # unsupported r for ci_type='U'
+    with assert_raises(ValueError):
+        _lladser_ci_from_r(r=42, t=10, f=10, alpha=0.95, ci_type='U')
+
+    # unsupported alpha for ci_type='L'
+    with assert_raises(ValueError):
+        _lladser_ci_from_r(r=3, t=10, f=10, alpha=0.90, ci_type='L')
+
+    # unsupported r for ci_type='L'
+    with assert_raises(ValueError):
+        _lladser_ci_from_r(r=50, t=10, f=10, alpha=0.95, ci_type='L')
+
+    # unknown ci_type
+    with assert_raises(ValueError):
+        _lladser_ci_from_r(r=4, t=10, f=10, alpha=0.95, ci_type='brofist')
+
+    # requesting CI for not precomputed values
+    with assert_raises(ValueError):
+        _lladser_ci_from_r(r=500, t=10, f=10)
+
+
+if __name__ == '__main__':
+    import nose
+    nose.runmodule()
diff --git a/skbio/diversity/beta/__init__.py b/skbio/diversity/beta/__init__.py
new file mode 100644
index 0000000..946f8cd
--- /dev/null
+++ b/skbio/diversity/beta/__init__.py
@@ -0,0 +1,194 @@
+"""
+Beta diversity measures (:mod:`skbio.diversity.beta`)
+=====================================================
+
+.. currentmodule:: skbio.diversity.beta
+
+This package contains helper functions for working with scipy's pairwise
+distance (``pdist``) functions in scikit-bio, and will eventually be expanded
+to contain pairwise distance/dissimilarity methods that are not implemented
+(or planned to be implemented) in scipy.
+
+The functions in this package currently support applying ``pdist`` functions
+to all pairs of samples in a sample by observation count or abundance matrix
+and returning an ``skbio.DistanceMatrix`` object. This application is
+illustrated below for a few different forms of input.
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+    pw_distances
+    pw_distances_from_table
+
+Examples
+--------
+Create a table containing 7 OTUs and 6 samples:
+
+.. plot::
+   :context:
+
+   >>> from skbio.diversity.beta import pw_distances
+   >>> import numpy as np
+   >>> data = [[23, 64, 14, 0, 0, 3, 1],
+   ...         [0, 3, 35, 42, 0, 12, 1],
+   ...         [0, 5, 5, 0, 40, 40, 0],
+   ...         [44, 35, 9, 0, 1, 0, 0],
+   ...         [0, 2, 8, 0, 35, 45, 1],
+   ...         [0, 0, 25, 35, 0, 19, 0]]
+   >>> ids = list('ABCDEF')
+
+   Compute Bray-Curtis distances between all pairs of samples and return a
+   ``DistanceMatrix`` object:
+
+   >>> bc_dm = pw_distances(data, ids, "braycurtis")
+   >>> print(bc_dm)
+   6x6 distance matrix
+   IDs:
+   'A', 'B', 'C', 'D', 'E', 'F'
+   Data:
+   [[ 0.          0.78787879  0.86666667  0.30927835  0.85714286  0.81521739]
+    [ 0.78787879  0.          0.78142077  0.86813187  0.75        0.1627907 ]
+    [ 0.86666667  0.78142077  0.          0.87709497  0.09392265  0.71597633]
+    [ 0.30927835  0.86813187  0.87709497  0.          0.87777778  0.89285714]
+    [ 0.85714286  0.75        0.09392265  0.87777778  0.          0.68235294]
+    [ 0.81521739  0.1627907   0.71597633  0.89285714  0.68235294  0.        ]]
+
+   Compute Jaccard distances between all pairs of samples and return a
+   ``DistanceMatrix`` object:
+
+   >>> j_dm = pw_distances(data, ids, "jaccard")
+   >>> print(j_dm)
+   6x6 distance matrix
+   IDs:
+   'A', 'B', 'C', 'D', 'E', 'F'
+   Data:
+   [[ 0.          0.83333333  1.          1.          0.83333333  1.        ]
+    [ 0.83333333  0.          1.          1.          0.83333333  1.        ]
+    [ 1.          1.          0.          1.          1.          1.        ]
+    [ 1.          1.          1.          0.          1.          1.        ]
+    [ 0.83333333  0.83333333  1.          1.          0.          1.        ]
+    [ 1.          1.          1.          1.          1.          0.        ]]
+
+   Determine if the resulting distance matrices are significantly correlated
+   by computing the Mantel correlation between them. Then determine if the
+   p-value is significant based on an alpha of 0.05:
+
+   >>> from skbio.stats.distance import mantel
+   >>> r, p_value, n = mantel(j_dm, bc_dm)
+   >>> print(r)
+   -0.209362157621
+   >>> print(p_value < 0.05)
+   False
+
+   Compute PCoA for both distance matrices, and then find the Procrustes
+   M-squared value that results from comparing the coordinate matrices.
+
+   >>> from skbio.stats.ordination import PCoA
+   >>> bc_pc = PCoA(bc_dm).scores()
+   >>> j_pc = PCoA(j_dm).scores()
+   >>> from skbio.stats.spatial import procrustes
+   >>> print(procrustes(bc_pc.site, j_pc.site)[2])
+   0.466134984787
+
+   All of this only gets interesting in the context of sample metadata, so
+   let's define some:
+
+   >>> import pandas as pd
+   >>> try:
+   ...     # not necessary for normal use
+   ...     pd.set_option('show_dimensions', True)
+   ... except KeyError:
+   ...     pass
+   >>> sample_md = {
+   ...    'A': {'body_site': 'gut', 'subject': 's1'},
+   ...    'B': {'body_site': 'skin', 'subject': 's1'},
+   ...    'C': {'body_site': 'tongue', 'subject': 's1'},
+   ...    'D': {'body_site': 'gut', 'subject': 's2'},
+   ...    'E': {'body_site': 'tongue', 'subject': 's2'},
+   ...    'F': {'body_site': 'skin', 'subject': 's2'}}
+   >>> sample_md = pd.DataFrame.from_dict(sample_md, orient='index')
+   >>> sample_md
+     subject body_site
+   A      s1       gut
+   B      s1      skin
+   C      s1    tongue
+   D      s2       gut
+   E      s2    tongue
+   F      s2      skin
+   <BLANKLINE>
+   [6 rows x 2 columns]
+
+   Now let's plot our PCoA results, coloring each sample by the subject it
+   was taken from:
+
+   >>> fig = bc_pc.plot(sample_md, 'subject',
+   ...                  axis_labels=('PC 1', 'PC 2', 'PC 3'),
+   ...                  title='Samples colored by subject', cmap='jet', s=50)
+
+.. plot::
+   :context:
+
+   We don't see any clustering/grouping of samples. If we were to instead color
+   the samples by the body site they were taken from, we see that the samples
+   form three separate groups:
+
+   >>> import matplotlib.pyplot as plt
+   >>> plt.close('all') # not necessary for normal use
+   >>> fig = bc_pc.plot(sample_md, 'body_site',
+   ...                  axis_labels=('PC 1', 'PC 2', 'PC 3'),
+   ...                  title='Samples colored by body site', cmap='jet', s=50)
+
+Ordination techniques, such as PCoA, are useful for exploratory analysis. The
+next step is to quantify the strength of the grouping/clustering that we see in
+ordination plots. There are many statistical methods available to accomplish
+this; many operate on distance matrices. Let's use ANOSIM to quantify the
+strength of the clustering we see in the ordination plots above, using our
+Bray-Curtis distance matrix and sample metadata.
+
+First test the grouping of samples by subject:
+
+>>> from skbio.stats.distance import anosim
+>>> results = anosim(bc_dm, sample_md, column='subject', permutations=999)
+>>> results['test statistic']
+-0.4074074074074075
+>>> results['p-value'] < 0.1
+False
+
+The negative value of ANOSIM's R statistic indicates anti-clustering and the
+p-value is insignificant at an alpha of 0.1.
+
+Now let's test the grouping of samples by body site:
+
+>>> results = anosim(bc_dm, sample_md, column='body_site', permutations=999)
+>>> results['test statistic']
+1.0
+>>> results['p-value'] < 0.1
+True
+
+The R statistic of 1.0 indicates strong separation of samples based on body
+site. The p-value is significant at an alpha of 0.1.
+
+References
+----------
+.. [1] http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._base import pw_distances, pw_distances_from_table
+
+__all__ = ["pw_distances", "pw_distances_from_table"]
+
+test = Tester().test
diff --git a/skbio/diversity/beta/_base.py b/skbio/diversity/beta/_base.py
new file mode 100644
index 0000000..0b63418
--- /dev/null
+++ b/skbio/diversity/beta/_base.py
@@ -0,0 +1,102 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from warnings import warn
+
+import numpy as np
+from scipy.spatial.distance import pdist, squareform
+
+from skbio.stats.distance import DistanceMatrix
+
+
+def pw_distances(counts, ids=None, metric="braycurtis"):
+    """Compute distances between all pairs of columns in a counts matrix
+
+    Parameters
+    ----------
+    counts : 2D array_like of ints or floats
+        Matrix containing count/abundance data where each row contains counts
+        of observations in a given sample.
+    ids : iterable of strs, optional
+        Identifiers for each sample in ``counts``.
+    metric : str, optional
+        The name of the pairwise distance function to use when generating
+        pairwise distances. See the scipy ``pdist`` docs, linked under *See
+        Also*, for available metrics.
+
+    Returns
+    -------
+    skbio.DistanceMatrix
+        Distances between all pairs of samples (i.e., rows). The number of
+        row and columns will be equal to the number of rows in ``counts``.
+
+    Raises
+    ------
+    ValueError
+        If ``len(ids) != len(counts)``.
+
+    See Also
+    --------
+    scipy.spatial.distance.pdist
+    pw_distances_from_table
+
+    """
+    num_samples = len(counts)
+    if ids is not None and num_samples != len(ids):
+        raise ValueError(
+            "Number of rows in counts must be equal to number of provided "
+            "ids.")
+
+    distances = pdist(counts, metric)
+    return DistanceMatrix(
+        squareform(distances, force='tomatrix', checks=False), ids)
+
+
+def pw_distances_from_table(table, metric="braycurtis"):
+    """Compute distances between all pairs of samples in table
+
+    Parameters
+    ----------
+    table : biom.table.Table
+        ``Table`` containing count/abundance data of observations across
+        samples.
+    metric : str, optional
+        The name of the pairwise distance function to use when generating
+        pairwise distances. See the scipy ``pdist`` docs, linked under *See
+        Also*, for available metrics.
+
+    Returns
+    -------
+    skbio.DistanceMatrix
+        Distances between all pairs of samples. The number of row and columns
+        will be equal to the number of samples in ``table``.
+
+    See Also
+    --------
+    scipy.spatial.distance.pdist
+    biom.table.Table
+    pw_distances
+
+    """
+    warn("pw_distances_from_table is deprecated. In the future (tentatively "
+         "scikit-bio 0.2.0), pw_distance will take a biom.table.Table object "
+         "and this function will be removed. You will need to update your "
+         "code to call pw_distances at that time.", DeprecationWarning)
+    sample_ids = table.ids(axis="sample")
+    num_samples = len(sample_ids)
+
+    # initialize the result object
+    dm = np.zeros((num_samples, num_samples))
+    for i, sid1 in enumerate(sample_ids):
+        v1 = table.data(sid1)
+        for j, sid2 in enumerate(sample_ids[:i]):
+            v2 = table.data(sid2)
+            dm[i, j] = dm[j, i] = pdist([v1, v2], metric)
+    return DistanceMatrix(dm, sample_ids)
diff --git a/skbio/diversity/beta/tests/__init__.py b/skbio/diversity/beta/tests/__init__.py
new file mode 100644
index 0000000..0bf0c55
--- /dev/null
+++ b/skbio/diversity/beta/tests/__init__.py
@@ -0,0 +1,7 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/diversity/beta/tests/test_base.py b/skbio/diversity/beta/tests/test_base.py
new file mode 100644
index 0000000..078cb5a
--- /dev/null
+++ b/skbio/diversity/beta/tests/test_base.py
@@ -0,0 +1,165 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio import DistanceMatrix
+from skbio.diversity.beta import pw_distances, pw_distances_from_table
+
+
+class HelperBiomTable(object):
+    """An object that looks like a BIOM table, for use in testing
+
+    This allows us to test passing BIOM-like objects, without having to
+    depend on the biom-format project (since this would ultimately be a
+    circular dependency).
+    """
+
+    def __init__(self, data, observation_ids, sample_ids):
+        self._data = data.T
+        self.observation_ids = observation_ids
+        self.sample_ids = sample_ids
+
+    def ids(self, axis):
+        return self.sample_ids
+
+    def data(self, sample_id):
+        i = self.sample_ids.index(sample_id)
+        return self._data[i]
+
+
+class BaseTests(TestCase):
+    def setUp(self):
+        self.t1 = [[1, 5],
+                   [2, 3],
+                   [0, 1]]
+        self.ids1 = list('ABC')
+
+        self.t2 = [[23, 64, 14, 0, 0, 3, 1],
+                   [0, 3, 35, 42, 0, 12, 1],
+                   [0, 5, 5, 0, 40, 40, 0],
+                   [44, 35, 9, 0, 1, 0, 0],
+                   [0, 2, 8, 0, 35, 45, 1],
+                   [0, 0, 25, 35, 0, 19, 0]]
+        self.ids2 = list('ABCDEF')
+
+        # In the future, if necessary, it should be possible to just replace
+        # HelperBiomTable with Table in the following lines to test with the
+        # biom.table.Table object directly (i.e., this constructor
+        # interface aligns with the biom.table.Table constructor
+        # interface).
+        self.table1 = HelperBiomTable(
+            np.array(self.t1).T, observation_ids=range(2),
+            sample_ids=self.ids1)
+        self.table2 = HelperBiomTable(
+            np.array(self.t2).T, observation_ids=range(7),
+            sample_ids=self.ids2)
+
+    def test_pw_distances_invalid_input(self):
+        # number of ids doesn't match the number of samples
+        self.assertRaises(ValueError, pw_distances, self.t1, list('AB'),
+                          'euclidean')
+
+    def test_pw_distances_euclidean(self):
+        actual_dm = pw_distances(self.t1, self.ids1, 'euclidean')
+        self.assertEqual(actual_dm.shape, (3, 3))
+        npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
+        npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
+        npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
+        npt.assert_almost_equal(actual_dm['A', 'B'], 2.23606798)
+        npt.assert_almost_equal(actual_dm['B', 'A'], 2.23606798)
+        npt.assert_almost_equal(actual_dm['A', 'C'], 4.12310563)
+        npt.assert_almost_equal(actual_dm['C', 'A'], 4.12310563)
+        npt.assert_almost_equal(actual_dm['B', 'C'], 2.82842712)
+        npt.assert_almost_equal(actual_dm['C', 'B'], 2.82842712)
+
+        actual_dm = pw_distances(self.t2, self.ids2, 'euclidean')
+        expected_data = [
+            [0., 80.8455317, 84.0297566, 36.3042697, 86.0116271, 78.9176786],
+            [80.8455317, 0., 71.0844568, 74.4714710, 69.3397433, 14.422205],
+            [84.0297566, 71.0844568, 0., 77.2851861, 8.3066238, 60.7536007],
+            [36.3042697, 74.4714710, 77.2851861, 0., 78.7908624, 70.7389567],
+            [86.0116271, 69.3397433, 8.3066238, 78.7908624, 0., 58.4807660],
+            [78.9176786, 14.422205, 60.7536007, 70.7389567, 58.4807660, 0.]]
+        expected_dm = DistanceMatrix(expected_data, self.ids2)
+        for id1 in self.ids2:
+            for id2 in self.ids2:
+                npt.assert_almost_equal(actual_dm[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_pw_distances_braycurtis(self):
+        actual_dm = pw_distances(self.t1, self.ids1, 'braycurtis')
+        self.assertEqual(actual_dm.shape, (3, 3))
+        npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
+        npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
+        npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
+        npt.assert_almost_equal(actual_dm['A', 'B'], 0.27272727)
+        npt.assert_almost_equal(actual_dm['B', 'A'], 0.27272727)
+        npt.assert_almost_equal(actual_dm['A', 'C'], 0.71428571)
+        npt.assert_almost_equal(actual_dm['C', 'A'], 0.71428571)
+        npt.assert_almost_equal(actual_dm['B', 'C'], 0.66666667)
+        npt.assert_almost_equal(actual_dm['C', 'B'], 0.66666667)
+
+        actual_dm = pw_distances(self.t2, self.ids2, 'braycurtis')
+        expected_data = [
+            [0., 0.78787879, 0.86666667, 0.30927835, 0.85714286, 0.81521739],
+            [0.78787879, 0., 0.78142077, 0.86813187, 0.75, 0.1627907],
+            [0.86666667, 0.78142077, 0., 0.87709497, 0.09392265, 0.71597633],
+            [0.30927835, 0.86813187, 0.87709497, 0., 0.87777778, 0.89285714],
+            [0.85714286, 0.75, 0.09392265, 0.87777778, 0., 0.68235294],
+            [0.81521739, 0.1627907, 0.71597633, 0.89285714, 0.68235294, 0.]]
+        expected_dm = DistanceMatrix(expected_data, self.ids2)
+        for id1 in self.ids2:
+            for id2 in self.ids2:
+                npt.assert_almost_equal(actual_dm[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_pw_distances_from_table_euclidean(self):
+        # results are equal when passed as Table or matrix
+        m_dm = pw_distances(self.t1, self.ids1, 'euclidean')
+        t_dm = npt.assert_warns(
+            DeprecationWarning, pw_distances_from_table, self.table1,
+            'euclidean')
+        for id1 in self.ids1:
+            for id2 in self.ids1:
+                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
+
+        m_dm = pw_distances(self.t2, self.ids2, 'euclidean')
+        t_dm = npt.assert_warns(
+            DeprecationWarning, pw_distances_from_table, self.table2,
+            'euclidean')
+        for id1 in self.ids2:
+            for id2 in self.ids2:
+                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
+
+    def test_pw_distances_from_table_braycurtis(self):
+        # results are equal when passed as Table or matrix
+        m_dm = pw_distances(self.t1, self.ids1, 'braycurtis')
+        t_dm = npt.assert_warns(
+            DeprecationWarning, pw_distances_from_table, self.table1,
+            'braycurtis')
+        for id1 in self.ids1:
+            for id2 in self.ids1:
+                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
+
+        m_dm = pw_distances(self.t2, self.ids2, 'braycurtis')
+        t_dm = npt.assert_warns(
+            DeprecationWarning, pw_distances_from_table, self.table2,
+            'braycurtis')
+        for id1 in self.ids2:
+            for id2 in self.ids2:
+                npt.assert_almost_equal(m_dm[id1, id2], t_dm[id1, id2])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/draw/__init__.py b/skbio/draw/__init__.py
new file mode 100644
index 0000000..d3be208
--- /dev/null
+++ b/skbio/draw/__init__.py
@@ -0,0 +1,37 @@
+"""
+Visualizations (:mod:`skbio.draw`)
+==================================
+
+.. currentmodule:: skbio.draw
+
+This module provides functionality for visualization of data.
+
+Distribution visualizations
+---------------------------
+
+Functions
+^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   boxplots
+   grouped_distributions
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._distributions import boxplots, grouped_distributions
+
+__all__ = ['boxplots', 'grouped_distributions']
+
+test = Tester().test
diff --git a/skbio/draw/_distributions.py b/skbio/draw/_distributions.py
new file mode 100644
index 0000000..2e664ec
--- /dev/null
+++ b/skbio/draw/_distributions.py
@@ -0,0 +1,701 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import map, range, zip
+from six import string_types
+
+from itertools import cycle
+import warnings
+
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.lines import Line2D
+from matplotlib.patches import Polygon, Rectangle
+
+
+def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
+             x_label=None, y_label=None, x_tick_labels_orientation='vertical',
+             y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
+             box_colors=None, figure_width=None, figure_height=None,
+             legend=None):
+    """Generate a figure with a boxplot for each distribution.
+
+    Parameters
+    ----------
+    distributions: 2-D array_like
+        Distributions to plot. A boxplot will be created for each distribution.
+    x_values : list of numbers, optional
+        List indicating where each boxplot should be placed. Must be the same
+        length as `distributions` if provided.
+    x_tick_labels : list of str, optional
+        List of x-axis tick labels.
+    title : str, optional
+        Title of the plot.
+    x_label : str, optional
+        x-axis label.
+    y_label : str, optional
+        y-axis label.
+    x_tick_labels_orientation : {'vertical', 'horizontal'}
+        Orientation of the x-axis labels.
+    y_min : scalar, optional
+        Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
+    y_max : scalar, optional
+        Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
+    whisker_length : scalar, optional
+        Length of the whiskers as a function of the IQR. For example, if 1.5,
+        the whiskers extend to ``1.5 * IQR``. Anything outside of that range is
+        treated as an outlier.
+    box_width : scalar, optional
+        Width of each box in plot units.
+    box_colors : str, tuple, or list of colors, optional
+        Either a matplotlib-compatible string or tuple that indicates the color
+        to be used for every boxplot, or a list of colors to color each boxplot
+        individually. If ``None``, boxes will be the same color as the plot
+        background. If a list of colors is provided, a color must be provided
+        for each boxplot. Can also supply ``None`` instead of a color, which
+        will color the box the same color as the plot background.
+    figure_width : scalar, optional
+        Width of the plot figure in inches. If not provided, will default to
+        matplotlib's default figure width.
+    figure_height : scalar, optional
+        Height of the plot figure in inches. If not provided, will default to
+        matplotlib's default figure height.
+    legend : tuple or list, optional
+        Two-element tuple or list that contains a list of valid matplotlib
+        colors as the first element and a list of labels (strings) as the
+        second element. The lengths of the first and second elements must be
+        the same. If ``None``, a legend will not be plotted.
+
+    Returns
+    -------
+    matplotlib.figure.Figure
+        Figure containing a boxplot for each distribution.
+
+    See Also
+    --------
+    matplotlib.pyplot.boxplot
+    scipy.stats.ttest_ind
+
+    Notes
+    -----
+    This is a convenience wrapper around matplotlib's ``boxplot`` function that
+    allows for coloring of boxplots and legend generation.
+
+    Examples
+    --------
+    Create a plot with two boxplots:
+
+    .. plot::
+
+       >>> from skbio.draw import boxplots
+       >>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
+
+    Plot three distributions with custom colors and labels:
+
+    .. plot::
+
+       >>> from skbio.draw import boxplots
+       >>> fig = boxplots(
+       ...     [[2, 2, 1, 3], [0, -1, 0, 0.1, 0.3], [4, 5, 6, 3]],
+       ...     x_tick_labels=('Control', 'Treatment 1', 'Treatment 2'),
+       ...     box_colors=('green', 'blue', 'red'))
+
+    """
+    distributions = _validate_distributions(distributions)
+    num_dists = len(distributions)
+    _validate_x_values(x_values, x_tick_labels, num_dists)
+
+    # Create a new figure to plot our data on, and then plot the distributions.
+    fig, ax = plt.subplots()
+    box_plot = plt.boxplot(distributions, positions=x_values,
+                           whis=whisker_length, widths=box_width)
+
+    if box_colors is not None:
+        if _is_single_matplotlib_color(box_colors):
+            box_colors = [box_colors] * num_dists
+        _color_box_plot(ax, box_plot, box_colors)
+
+    # Set up the various plotting options, such as x- and y-axis labels, plot
+    # title, and x-axis values if they have been supplied.
+    _set_axes_options(ax, title, x_label, y_label,
+                      x_tick_labels=x_tick_labels,
+                      x_tick_labels_orientation=x_tick_labels_orientation,
+                      y_min=y_min, y_max=y_max)
+
+    if legend is not None:
+        if len(legend) != 2:
+            raise ValueError("Invalid legend was provided. The legend must be "
+                             "a two-element tuple/list where the first "
+                             "element is a list of colors and the second "
+                             "element is a list of labels.")
+        _create_legend(ax, legend[0], legend[1], 'colors')
+
+    _set_figure_size(fig, figure_width, figure_height)
+    return fig
+
+
+def grouped_distributions(plot_type, data, x_values=None,
+                          data_point_labels=None, distribution_labels=None,
+                          distribution_markers=None, x_label=None,
+                          y_label=None, title=None,
+                          x_tick_labels_orientation='vertical', y_min=None,
+                          y_max=None, whisker_length=1.5,
+                          error_bar_type='stdv', distribution_width=None,
+                          figure_width=None, figure_height=None):
+    """Generate a figure with distributions grouped at points along the x-axis.
+
+    Parameters
+    ----------
+    plot_type : {'bar', 'scatter', 'box'}
+        Type of plot to visualize distributions with.
+    data : list of lists of lists
+        Each inner list represents a data point along the x-axis. Each data
+        point contains lists of data for each distribution in the group at that
+        point. This nesting allows for the grouping of distributions at each
+        data point.
+    x_values : list of scalars, optional
+        Spacing of data points along the x-axis. Must be the same length as the
+        number of data points and be in ascending sorted order. If not
+        provided, plots will be spaced evenly.
+    data_point_labels : list of str, optional
+        Labels for data points.
+    distribution_labels : list of str, optional
+        Labels for each distribution in a data point grouping.
+    distribution_markers : list of str or list of tuple, optional
+        Matplotlib-compatible strings or tuples that indicate the color or
+        symbol to be used to distinguish each distribution in a data point
+        grouping. Colors will be used for bar charts or box plots, while
+        symbols will be used for scatter plots.
+    x_label : str, optional
+        x-axis label.
+    y_label : str, optional
+        y-axis label.
+    title : str, optional
+        Plot title.
+    x_tick_labels_orientation : {'vertical', 'horizontal'}
+        Orientation of x-axis labels.
+    y_min : scalar, optional
+        Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
+    y_max : scalar, optional
+        Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
+    whisker_length : scalar, optional
+        If `plot_type` is ``'box'``, determines the length of the whiskers as a
+        function of the IQR. For example, if 1.5, the whiskers extend to
+        ``1.5 * IQR``. Anything outside of that range is seen as an outlier.
+        If `plot_type` is not ``'box'``, this parameter is ignored.
+    error_bar_type : {'stdv', 'sem'}
+        Type of error bars to use if `plot_type` is ``'bar'``. Can be either
+        ``'stdv'`` (for standard deviation) or ``'sem'`` for the standard error
+        of the mean. If `plot_type` is not ``'bar'``, this parameter is
+        ignored.
+    distribution_width : scalar, optional
+        Width in plot units of each individual distribution (e.g. each bar if
+        the plot type is a bar chart, or the width of each box if the plot type
+        is a boxplot). If None, will be automatically determined.
+    figure_width : scalar, optional
+        Width of the plot figure in inches. If not provided, will default to
+        matplotlib's default figure width.
+    figure_height : scalar, optional
+        Height of the plot figure in inches. If not provided, will default to
+        matplotlib's default figure height.
+
+    Returns
+    -------
+    matplotlib.figure.Figure
+        Figure containing distributions grouped at points along the x-axis.
+
+    Examples
+    --------
+    Create a plot with two distributions grouped at three points:
+
+    .. plot::
+
+       >>> from skbio.draw import grouped_distributions
+       >>> fig = grouped_distributions('bar',
+       ...                             [[[2, 2, 1,], [0, 1, 4]],
+       ...                             [[1, 1, 1], [4, 4.5]],
+       ...                             [[2.2, 2.4, 2.7, 1.0], [0, 0.2]]],
+       ...                             distribution_labels=['Treatment 1',
+       ...                                                  'Treatment 2'])
+
+    """
+    # Set up different behavior based on the plot type.
+    if plot_type == 'bar':
+        plotting_function = _plot_bar_data
+        distribution_centered = False
+        marker_type = 'colors'
+    elif plot_type == 'scatter':
+        plotting_function = _plot_scatter_data
+        distribution_centered = True
+        marker_type = 'symbols'
+    elif plot_type == 'box':
+        plotting_function = _plot_box_data
+        distribution_centered = True
+        marker_type = 'colors'
+    else:
+        raise ValueError("Invalid plot type '%s'. Supported plot types are "
+                         "'bar', 'scatter', or 'box'." % plot_type)
+
+    num_points, num_distributions = _validate_input(data, x_values,
+                                                    data_point_labels,
+                                                    distribution_labels)
+
+    # Create a list of matplotlib markers (colors or symbols) that can be used
+    # to distinguish each of the distributions. If the user provided a list of
+    # markers, use it and loop around to the beginning if there aren't enough
+    # markers. If they didn't provide a list, or it was empty, use our own
+    # predefined list of markers (again, loop around to the beginning if we
+    # need more markers).
+    distribution_markers = _get_distribution_markers(marker_type,
+                                                     distribution_markers,
+                                                     num_distributions)
+
+    # Now calculate where each of the data points will start on the x-axis.
+    x_locations = _calc_data_point_locations(num_points, x_values)
+    assert (len(x_locations) == num_points), "The number of x_locations " +\
+        "does not match the number of data points."
+
+    if distribution_width is None:
+        # Find the smallest gap between consecutive data points and divide this
+        # by the number of distributions + 1 for some extra spacing between
+        # data points.
+        min_gap = max(x_locations)
+        for i in range(len(x_locations) - 1):
+            curr_gap = x_locations[i + 1] - x_locations[i]
+            if curr_gap < min_gap:
+                min_gap = curr_gap
+
+        distribution_width = min_gap / float(num_distributions + 1)
+    else:
+        if distribution_width <= 0:
+            raise ValueError("The width of a distribution cannot be less than "
+                             "or equal to zero.")
+
+    result, plot_axes = plt.subplots()
+
+    # Iterate over each data point, and plot each of the distributions at that
+    # data point. Increase the offset after each distribution is plotted,
+    # so that the grouped distributions don't overlap.
+    for point, x_pos in zip(data, x_locations):
+        dist_offset = 0
+        for dist_index, dist, dist_marker in zip(range(num_distributions),
+                                                 point, distribution_markers):
+            dist_location = x_pos + dist_offset
+            plotting_function(plot_axes, dist, dist_marker, distribution_width,
+                              dist_location, whisker_length, error_bar_type)
+            dist_offset += distribution_width
+
+    # Set up various plot options that are best set after the plotting is done.
+    # The x-axis tick marks (one per data point) are centered on each group of
+    # distributions.
+    plot_axes.set_xticks(_calc_data_point_ticks(x_locations,
+                                                num_distributions,
+                                                distribution_width,
+                                                distribution_centered))
+    _set_axes_options(plot_axes, title, x_label, y_label, x_values,
+                      data_point_labels, x_tick_labels_orientation, y_min,
+                      y_max)
+
+    if distribution_labels is not None:
+        _create_legend(plot_axes, distribution_markers, distribution_labels,
+                       marker_type)
+
+    _set_figure_size(result, figure_width, figure_height)
+
+    # matplotlib seems to sometimes plot points on the rightmost edge of the
+    # plot without adding padding, so we need to add our own to both sides of
+    # the plot. For some reason this has to go after the call to draw(),
+    # otherwise matplotlib throws an exception saying it doesn't have a
+    # renderer. Boxplots need extra padding on the left.
+    if plot_type == 'box':
+        left_pad = 2 * distribution_width
+    else:
+        left_pad = distribution_width
+    plot_axes.set_xlim(plot_axes.get_xlim()[0] - left_pad,
+                       plot_axes.get_xlim()[1] + distribution_width)
+
+    return result
+
+
+def _validate_distributions(distributions):
+    dists = []
+    for distribution in distributions:
+        try:
+            distribution = np.asarray(distribution, dtype=float)
+        except ValueError:
+            raise ValueError("Each value in each distribution must be "
+                             "convertible to a number.")
+
+        # Empty distributions are plottable in mpl < 1.4.0. In 1.4.0, a
+        # ValueError is raised. This has been fixed in mpl 1.4.0-dev (see
+        # https://github.com/matplotlib/matplotlib/pull/3571). In order to
+        # support empty distributions across mpl versions, we replace them with
+        # [np.nan]. See https://github.com/pydata/pandas/issues/8382,
+        # https://github.com/matplotlib/matplotlib/pull/3571, and
+        # https://github.com/pydata/pandas/pull/8240 for details.
+        # If we decide to only support mpl > 1.4.0 in the future, this code can
+        # likely be removed in favor of letting mpl handle empty distributions.
+        if distribution.size > 0:
+            dists.append(distribution)
+        else:
+            dists.append(np.array([np.nan]))
+    return dists
+
+
+def _validate_input(data, x_values, data_point_labels, distribution_labels):
+    """Returns a tuple containing the number of data points and distributions
+    in the data.
+
+    Validates plotting options to make sure they are valid with the supplied
+    data.
+    """
+    if data is None or not data or isinstance(data, string_types):
+        raise ValueError("The data must be a list type, and it cannot be "
+                         "None or empty.")
+
+    num_points = len(data)
+    num_distributions = len(data[0])
+
+    empty_data_error_msg = ("The data must contain at least one data "
+                            "point, and each data point must contain at "
+                            "least one distribution to plot.")
+    if num_points == 0 or num_distributions == 0:
+        raise ValueError(empty_data_error_msg)
+
+    for point in data:
+        if len(point) == 0:
+            raise ValueError(empty_data_error_msg)
+        if len(point) != num_distributions:
+            raise ValueError("The number of distributions in each data point "
+                             "grouping must be the same for all data points.")
+
+    # Make sure we have the right number of x values (one for each data point),
+    # and make sure they are numbers.
+    _validate_x_values(x_values, data_point_labels, num_points)
+
+    if (distribution_labels is not None and
+            len(distribution_labels) != num_distributions):
+        raise ValueError("The number of distribution labels must be equal "
+                         "to the number of distributions.")
+    return num_points, num_distributions
+
+
+def _validate_x_values(x_values, x_tick_labels, num_expected_values):
+    """Validates the x values provided by the user, making sure they are the
+    correct length and are all numbers.
+
+    Also validates the number of x-axis tick labels.
+
+    Raises a ValueError if these conditions are not met.
+    """
+    if x_values is not None:
+        if len(x_values) != num_expected_values:
+            raise ValueError("The number of x values must match the number "
+                             "of data points.")
+        try:
+            list(map(float, x_values))
+        except:
+            raise ValueError("Each x value must be a number.")
+
+    if x_tick_labels is not None:
+        if len(x_tick_labels) != num_expected_values:
+            raise ValueError("The number of x-axis tick labels must match the "
+                             "number of data points.")
+
+
+def _get_distribution_markers(marker_type, marker_choices, num_markers):
+    """Returns a list of length num_markers of valid matplotlib colors or
+    symbols.
+
+    The markers will be comprised of those found in marker_choices (if not None
+    and not empty) or a list of predefined markers (determined by marker_type,
+    which can be either 'colors' or 'symbols'). If there are not enough
+    markers, the list of markers will be reused from the beginning again (as
+    many times as are necessary).
+    """
+    if num_markers < 0:
+        raise ValueError("num_markers must be greater than or equal to zero.")
+    if marker_choices is None or len(marker_choices) == 0:
+        if marker_type == 'colors':
+            marker_choices = ['b', 'g', 'r', 'c', 'm', 'y', 'w']
+        elif marker_type == 'symbols':
+            marker_choices = \
+                ['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']
+        else:
+            raise ValueError("Invalid marker_type: '%s'. marker_type must be "
+                             "either 'colors' or 'symbols'." % marker_type)
+    if len(marker_choices) < num_markers:
+        # We don't have enough markers to represent each distribution uniquely,
+        # so let the user know. We'll add as many markers (starting from the
+        # beginning of the list again) until we have enough, but the user
+        # should still know because they may want to provide a new list of
+        # markers.
+        warnings.warn(
+            "There are not enough markers to uniquely represent each "
+            "distribution in your dataset. You may want to provide a list "
+            "of markers that is at least as large as the number of "
+            "distributions in your dataset.",
+            RuntimeWarning)
+        marker_cycle = cycle(marker_choices[:])
+        while len(marker_choices) < num_markers:
+            marker_choices.append(next(marker_cycle))
+    return marker_choices[:num_markers]
+
+
+def _calc_data_point_locations(num_points, x_values=None):
+    """Returns the x-axis location for each of the data points to start at.
+
+    Note: A numpy array is returned so that the overloaded "+" operator can be
+    used on the array.
+
+    The x-axis locations are scaled by x_values if it is provided, or else the
+    x-axis locations are evenly spaced. In either case, the x-axis locations
+    will always be in the range [1, num_points].
+    """
+    if x_values is None:
+        # Evenly space the x-axis locations.
+        x_locs = np.arange(1, num_points + 1)
+    else:
+        if len(x_values) != num_points:
+            raise ValueError("The number of x-axis values must match the "
+                             "number of data points.")
+
+        # Scale to the range [1, num_points]. Taken from
+        # http://www.heatonresearch.com/wiki/Range_Normalization
+        x_min = min(x_values)
+        x_max = max(x_values)
+        x_range = x_max - x_min
+        n_range = num_points - 1
+        x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
+                           for x_val in x_values])
+
+    return x_locs
+
+
+def _calc_data_point_ticks(x_locations, num_distributions, distribution_width,
+                           distribution_centered):
+    """Returns a 1D numpy array of x-axis tick positions.
+
+    These positions will be centered on each data point.
+
+    Set distribution_centered to True for scatter and box plots because their
+    plot types naturally center over a given horizontal position. Bar charts
+    should use distribution_centered = False because the leftmost edge of a bar
+    starts at a given horizontal position and extends to the right for the
+    width of the bar.
+    """
+    dist_size = num_distributions - 1 if distribution_centered else\
+        num_distributions
+    return x_locations + ((dist_size * distribution_width) / 2)
+
+
+def _plot_bar_data(plot_axes, distribution, distribution_color,
+                   distribution_width, x_position, whisker_length,
+                   error_bar_type):
+    """Returns the result of plotting a single bar in matplotlib."""
+    result = None
+
+    # We do not want to plot empty distributions because matplotlib will not be
+    # able to render them as PDFs.
+    if len(distribution) > 0:
+        avg = np.mean(distribution)
+        if error_bar_type == 'stdv':
+            error_bar = np.std(distribution)
+        elif error_bar_type == 'sem':
+            error_bar = np.std(distribution) / np.sqrt(len(distribution))
+        else:
+            raise ValueError(
+                "Invalid error bar type '%s'. Supported error bar types are "
+                "'stdv' and 'sem'." % error_bar_type)
+        result = plot_axes.bar(x_position, avg, distribution_width,
+                               yerr=error_bar, ecolor='black',
+                               facecolor=distribution_color)
+    return result
+
+
+def _plot_scatter_data(plot_axes, distribution, distribution_symbol,
+                       distribution_width, x_position, whisker_length,
+                       error_bar_type):
+    """Returns the result of plotting a single scatterplot in matplotlib."""
+    result = None
+    x_vals = [x_position] * len(distribution)
+
+    # matplotlib's scatter function doesn't like plotting empty data.
+    if len(x_vals) > 0 and len(distribution) > 0:
+        result = plot_axes.scatter(x_vals, distribution,
+                                   marker=distribution_symbol, c='k')
+    return result
+
+
+def _plot_box_data(plot_axes, distribution, distribution_color,
+                   distribution_width, x_position, whisker_length,
+                   error_bar_type):
+    """Returns the result of plotting a single boxplot in matplotlib."""
+    result = None
+
+    if len(distribution) > 0:
+        result = plot_axes.boxplot([distribution], positions=[x_position],
+                                   widths=distribution_width,
+                                   whis=whisker_length)
+        _color_box_plot(plot_axes, result, [distribution_color])
+
+    return result
+
+
+def _is_single_matplotlib_color(color):
+    """Returns True if color is a single (not a list) mpl color."""
+    single_color = False
+
+    if (isinstance(color, str)):
+        single_color = True
+    elif len(color) == 3 or len(color) == 4:
+        single_color = True
+
+        for e in color:
+            if not (isinstance(e, float) or isinstance(e, int)):
+                single_color = False
+
+    return single_color
+
+
+def _color_box_plot(plot_axes, box_plot, colors):
+    """Color boxes in the box plot with the specified colors.
+
+    If any of the colors are None, the box will not be colored.
+
+    The box_plot argument must be the dictionary returned by the call to
+    matplotlib's boxplot function, and the colors argument must consist of
+    valid matplotlib colors.
+    """
+    # Note: the following code is largely taken from this matplotlib boxplot
+    # example:
+    # http://matplotlib.sourceforge.net/examples/pylab_examples/
+    #     boxplot_demo2.html
+    num_colors = len(colors)
+    num_box_plots = len(box_plot['boxes'])
+    if num_colors != num_box_plots:
+        raise ValueError("The number of colors (%d) does not match the number "
+                         "of boxplots (%d)." % (num_colors, num_box_plots))
+
+    for box, median, color in zip(box_plot['boxes'],
+                                  box_plot['medians'],
+                                  colors):
+        if color is not None:
+            box_x = []
+            box_y = []
+
+            # There are five points in the box. The first is the same as
+            # the last.
+            for i in range(5):
+                box_x.append(box.get_xdata()[i])
+                box_y.append(box.get_ydata()[i])
+
+            box_coords = list(zip(box_x, box_y))
+            box_polygon = Polygon(box_coords, facecolor=color)
+            plot_axes.add_patch(box_polygon)
+
+            # Draw the median lines back over what we just filled in with
+            # color.
+            median_x = []
+            median_y = []
+            for i in range(2):
+                median_x.append(median.get_xdata()[i])
+                median_y.append(median.get_ydata()[i])
+                plot_axes.plot(median_x, median_y, 'black')
+
+
+def _set_axes_options(plot_axes, title=None, x_label=None, y_label=None,
+                      x_values=None, x_tick_labels=None,
+                      x_tick_labels_orientation='vertical', y_min=None,
+                      y_max=None):
+    """Applies various labelling options to the plot axes."""
+    if title is not None:
+        plot_axes.set_title(title)
+    if x_label is not None:
+        plot_axes.set_xlabel(x_label)
+    if y_label is not None:
+        plot_axes.set_ylabel(y_label)
+
+    if (x_tick_labels_orientation != 'vertical' and
+            x_tick_labels_orientation != 'horizontal'):
+        raise ValueError("Invalid orientation for x-axis tick labels: '%s'. "
+                         "Valid orientations are 'vertical' or 'horizontal'."
+                         % x_tick_labels_orientation)
+
+    # If labels are provided, always use them. If they aren't, use the x_values
+    # that denote the spacing between data points as labels. If that isn't
+    # available, simply label the data points in an incremental fashion,
+    # i.e. 1, 2, 3, ..., n, where n is the number of data points on the plot.
+    if x_tick_labels is not None:
+        plot_axes.set_xticklabels(x_tick_labels,
+                                  rotation=x_tick_labels_orientation)
+    elif x_tick_labels is None and x_values is not None:
+        plot_axes.set_xticklabels(x_values, rotation=x_tick_labels_orientation)
+    else:
+        plot_axes.set_xticklabels(
+            range(1, len(plot_axes.get_xticklabels()) + 1),
+            rotation=x_tick_labels_orientation)
+
+    # Set the y-axis range if specified.
+    if y_min is not None:
+        plot_axes.set_ylim(bottom=float(y_min))
+    if y_max is not None:
+        plot_axes.set_ylim(top=float(y_max))
+
+
+def _create_legend(plot_axes, distribution_markers, distribution_labels,
+                   marker_type):
+    """Creates a legend on the supplied axes."""
+    # We have to use a proxy artist for the legend because box plots currently
+    # don't have a very useful legend in matplotlib, and using the default
+    # legend for bar/scatterplots chokes on empty/null distributions.
+    #
+    # Note: This code is based on the following examples:
+    #   http://matplotlib.sourceforge.net/users/legend_guide.html
+    #   http://stackoverflow.com/a/11423554
+    if len(distribution_markers) != len(distribution_labels):
+        raise ValueError("The number of distribution markers does not match "
+                         "the number of distribution labels.")
+    if marker_type == 'colors':
+        legend_proxy = [Rectangle((0, 0), 1, 1, fc=marker)
+                        for marker in distribution_markers]
+        plot_axes.legend(legend_proxy, distribution_labels, loc='best')
+    elif marker_type == 'symbols':
+        legend_proxy = [Line2D(range(1), range(1), color='white',
+                        markerfacecolor='black', marker=marker)
+                        for marker in distribution_markers]
+        plot_axes.legend(legend_proxy, distribution_labels, numpoints=3,
+                         scatterpoints=3, loc='best')
+    else:
+        raise ValueError("Invalid marker_type: '%s'. marker_type must be "
+                         "either 'colors' or 'symbols'." % marker_type)
+
+
+def _set_figure_size(fig, width=None, height=None):
+    """Sets the plot figure size and makes room for axis labels, titles, etc.
+
+    If both width and height are not provided, will use matplotlib defaults.
+
+    Making room for labels will not always work, and if it fails, the user will
+    be warned that their plot may have cut-off labels.
+    """
+    # Set the size of the plot figure, then make room for the labels so they
+    # don't get cut off. Must be done in this order.
+    if width is not None and height is not None and width > 0 and height > 0:
+        fig.set_size_inches(width, height)
+    try:
+        fig.tight_layout()
+    except ValueError:
+        warnings.warn(
+            "Could not automatically resize plot to make room for "
+            "axes labels and plot title. This can happen if the labels or "
+            "title are extremely long and the plot size is too small. Your "
+            "plot may have its labels and/or title cut-off. To fix this, "
+            "try increasing the plot's size (in inches) and try again.",
+            RuntimeWarning)
diff --git a/skbio/draw/tests/__init__.py b/skbio/draw/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/draw/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/draw/tests/test_distributions.py b/skbio/draw/tests/test_distributions.py
new file mode 100644
index 0000000..7cccbb5
--- /dev/null
+++ b/skbio/draw/tests/test_distributions.py
@@ -0,0 +1,576 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+import matplotlib.pyplot as plt
+
+from skbio.draw import boxplots, grouped_distributions
+from skbio.draw._distributions import (
+    _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot,
+    _create_legend, _get_distribution_markers, _is_single_matplotlib_color,
+    _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options,
+    _set_figure_size, _validate_input, _validate_x_values)
+
+
+class DistributionsTests(TestCase):
+    def setUp(self):
+        # Test null data list.
+        self.Null = None
+
+        # Test empty data list.
+        self.Empty = []
+
+        # Test nested empty data list.
+        self.EmptyNested = [[]]
+
+        # Test nested empty data list (for bar/scatter plots).
+        self.EmptyDeeplyNested = [[[]]]
+
+        # Test invalid number of samples in data list (for bar/scatter plots).
+        self.InvalidNumSamples = [[[1, 2, 3, 4, 5]],
+                                  [[4, 5, 6, 7, 8], [2, 3, 2]],
+                                  [[4, 7, 10, 33, 32, 6, 7, 8]]]
+
+        # Test valid data with three samples and four data points
+        # (for bar/scatter plots).
+        self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]],
+                                 [[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]],
+                                 [[4, 33, 32, 6, 8], [5, 4, 8, 13], [1, 1, 2]],
+                                 [[2, 2, 2, 2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]]
+
+        # Test valid data with one sample (for bar/scatter plots).
+        self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]],
+                                      [[4, 5, 6, 7, 8]],
+                                      [[4, 7, 10, 33, 32, 6, 7, 8]]]
+
+        # Test typical data to be plotted by the boxplot function.
+        self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99],
+                                    [2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3, -8],
+                                    [2, 9, 7, 5, 6]]
+
+    def tearDown(self):
+        # We get a warning from mpl if we don't clean up our figures.
+        plt.close('all')
+
+    def test_validate_input_null(self):
+        with npt.assert_raises(ValueError):
+            _validate_input(self.Null, None, None, None)
+
+    def test_validate_input_empty(self):
+        with npt.assert_raises(ValueError):
+            _validate_input(self.Empty, None, None, None)
+
+    def test_validate_input_empty_nested(self):
+        with npt.assert_raises(ValueError):
+            _validate_input(self.EmptyNested, None, None, None)
+
+    def test_validate_input_empty_deeply_nested(self):
+        num_points, num_samples = _validate_input(self.EmptyDeeplyNested,
+                                                  None, None, None)
+        self.assertEqual(num_points, 1)
+        self.assertEqual(num_samples, 1)
+
+    def test_validate_input_empty_point(self):
+        with npt.assert_raises(ValueError):
+            _validate_input([[[1, 2, 3], [4, 5]], []], None, None, None)
+
+    def test_validate_input_invalid_num_samples(self):
+        with npt.assert_raises(ValueError):
+            _validate_input(self.InvalidNumSamples, None, None, None)
+
+    def test_validate_input_invalid_data_point_names(self):
+        with npt.assert_raises(ValueError):
+            _validate_input(self.ValidSingleSampleData, None, ["T0", "T1"],
+                            None)
+
+    def test_validate_input_invalid_sample_names(self):
+        with npt.assert_raises(ValueError):
+            _validate_input(self.ValidSingleSampleData, None, None,
+                            ["Men", "Women"])
+
+    def test_validate_input_all_valid_input(self):
+        self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8],
+                                         ["T0", "T1", "T2", "T3"],
+                                         ["Infants", "Children", "Teens"]),
+                         (4, 3))
+
+    def test_validate_x_values_invalid_x_values(self):
+        with npt.assert_raises(ValueError):
+            _validate_x_values([1, 2, 3, 4], ["T0", "T1", "T2"],
+                               len(self.ValidSingleSampleData))
+
+    def test_validate_x_values_invalid_x_tick_labels(self):
+        with npt.assert_raises(ValueError):
+            _validate_x_values(None, ["T0"], len(self.ValidSingleSampleData))
+
+    def test_validate_x_values_nonnumber_x_values(self):
+        with npt.assert_raises(ValueError):
+            _validate_x_values(["foo", 2, 3], None,
+                               len(self.ValidSingleSampleData))
+
+    def test_validate_x_values_valid_x_values(self):
+        _validate_x_values([1, 2.0, 3], None, 3)
+
+    def test_get_distribution_markers_null_marker_list(self):
+        self.assertEqual(_get_distribution_markers('colors', None, 5),
+                         ['b', 'g', 'r', 'c', 'm'])
+
+    def test_get_distribution_markers_empty_marker_list(self):
+        self.assertEqual(_get_distribution_markers('colors', None, 4),
+                         ['b', 'g', 'r', 'c'])
+
+    def test_get_distribution_markers_insufficient_markers(self):
+        self.assertEqual(npt.assert_warns(RuntimeWarning,
+                                          _get_distribution_markers,
+                                          'colors', None, 10),
+                         ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r'])
+        self.assertEqual(npt.assert_warns(RuntimeWarning,
+                                          _get_distribution_markers,
+                                          'symbols', ['^', '>', '<'], 5),
+                         ['^', '>', '<', '^', '>'])
+
+    def test_get_distribution_markers_bad_marker_type(self):
+        with npt.assert_raises(ValueError):
+            _get_distribution_markers('shapes', [], 3)
+
+    def test_get_distribution_markers_zero_markers(self):
+        self.assertEqual(_get_distribution_markers('symbols', None, 0), [])
+        self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), [])
+
+    def test_get_distribution_markers_negative_num_markers(self):
+        with npt.assert_raises(ValueError):
+            _get_distribution_markers('symbols', [], -1)
+
+    def test_plot_bar_data(self):
+        fig, ax = plt.subplots()
+        result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv')
+        self.assertEqual(result[0].__class__.__name__, "Rectangle")
+        self.assertEqual(len(result), 1)
+        self.assertAlmostEqual(result[0].get_width(), 0.5)
+        self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
+        self.assertAlmostEqual(result[0].get_height(), 2.0)
+
+        fig, ax = plt.subplots()
+        result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem')
+        self.assertEqual(result[0].__class__.__name__, "Rectangle")
+        self.assertEqual(len(result), 1)
+        self.assertAlmostEqual(result[0].get_width(), 0.5)
+        self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
+        self.assertAlmostEqual(result[0].get_height(), 2.0)
+
+    def test_plot_bar_data_bad_error_bar_type(self):
+        fig, ax = plt.subplots()
+        with npt.assert_raises(ValueError):
+            _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var')
+
+    def test_plot_bar_data_empty(self):
+        fig, ax = plt.subplots()
+        result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv')
+        self.assertTrue(result is None)
+
+        fig, ax = plt.subplots()
+        result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem')
+        self.assertTrue(result is None)
+
+    def test_plot_scatter_data(self):
+        fig, ax = plt.subplots()
+        result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv')
+        self.assertEqual(result.get_sizes(), 20)
+
+    def test_plot_scatter_data_empty(self):
+        fig, ax = plt.subplots()
+        result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv')
+        self.assertTrue(result is None)
+
+    def test_plot_box_data(self):
+        fig, ax = plt.subplots()
+        result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55,
+                                1.5, 'stdv')
+        self.assertEqual(result.__class__.__name__, "dict")
+        self.assertEqual(len(result['boxes']), 1)
+        self.assertEqual(len(result['medians']), 1)
+        self.assertEqual(len(result['whiskers']), 2)
+
+        # mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one,
+        # though the resulting plot looks identical between the two versions.
+        # see:
+        #   https://github.com/pydata/pandas/issues/8382#issuecomment-56840974
+        #   https://github.com/matplotlib/matplotlib/issues/3544
+        self.assertTrue(len(result['fliers']) == 1 or
+                        len(result['fliers']) == 2)
+
+        self.assertEqual(len(result['caps']), 2)
+
+    def test_plot_box_data_empty(self):
+        fig, ax = plt.subplots()
+        result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv')
+        self.assertTrue(result is None)
+
+    def test_calc_data_point_locations_invalid_x_values(self):
+        with npt.assert_raises(ValueError):
+            _calc_data_point_locations(3, [1, 10.5])
+
+    def test_calc_data_point_locations_default_spacing(self):
+        locs = _calc_data_point_locations(4)
+        np.testing.assert_allclose(locs, [1, 2, 3, 4])
+
+    def test_calc_data_point_locations_custom_spacing(self):
+        # Scaling down from 3..12 to 1..4.
+        locs = _calc_data_point_locations(4, [3, 4, 10, 12])
+        np.testing.assert_allclose(locs,
+                                   np.array([1, 1.33333333, 3.33333333, 4]))
+
+        # Sorted order shouldn't affect scaling.
+        locs = _calc_data_point_locations(4, [4, 3, 12, 10])
+        np.testing.assert_allclose(locs,
+                                   np.array([1.33333333, 1, 4, 3.33333333]))
+
+        # Scaling up from 0.001..0.87 to 1..3.
+        locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87])
+        np.testing.assert_allclose(locs,
+                                   np.array([1, 1.58296893, 3]))
+
+    def test_calc_data_point_ticks(self):
+        ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False)
+        np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25])
+
+        ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False)
+        np.testing.assert_allclose(ticks, [0.75])
+
+    def test_set_axes_options(self):
+        fig, ax = plt.subplots()
+        _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
+                          x_tick_labels=["T0", "T1"])
+        self.assertEqual(ax.get_title(), "Plot Title")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
+        self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
+
+    def test_set_axes_options_ylim(self):
+        fig, ax = plt.subplots()
+        _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
+                          x_tick_labels=["T0", "T1", "T2"], y_min=0, y_max=1)
+        self.assertEqual(ax.get_title(), "Plot Title")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
+        self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
+        self.assertEqual(ax.get_ylim(), (0.0, 1.0))
+
+    def test_set_axes_options_x_values_as_tick_labels(self):
+        fig, ax = plt.subplots()
+        _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
+                          x_values=[42, 45, 800])
+
+        self.assertEqual(ax.get_title(), "Plot Title")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(ax.get_xticklabels()[0].get_text(), '42')
+        self.assertEqual(ax.get_xticklabels()[1].get_text(), '45')
+        self.assertEqual(ax.get_xticklabels()[2].get_text(), '800')
+
+    def test_set_axes_options_bad_ylim(self):
+        fig, ax = plt.subplots()
+        with npt.assert_raises(ValueError):
+            _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
+                              x_tick_labels=["T0", "T1", "T2"], y_min='car',
+                              y_max=30)
+
+    def test_set_axes_options_invalid_x_tick_labels_orientation(self):
+        fig, ax = plt.subplots()
+        with npt.assert_raises(ValueError):
+            _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
+                              x_tick_labels=["T0", "T1"],
+                              x_tick_labels_orientation='brofist')
+
+    def test_create_legend(self):
+        fig, ax = plt.subplots()
+        _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors')
+        self.assertEqual(len(ax.get_legend().get_texts()), 2)
+
+        fig, ax = plt.subplots()
+        _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
+                       'symbols')
+        self.assertEqual(len(ax.get_legend().get_texts()), 3)
+
+    def test_create_legend_invalid_input(self):
+        fig, ax = plt.subplots()
+        with npt.assert_raises(ValueError):
+            _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols')
+        with npt.assert_raises(ValueError):
+            _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
+                           'foo')
+
+    def test_grouped_distributions_bar(self):
+        fig = grouped_distributions('bar', self.ValidTypicalData,
+                                    [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
+                                    ["Infants", "Children", "Teens"],
+                                    ['b', 'r', 'g'], "x-axis label",
+                                    "y-axis label", "Test")
+        ax = fig.get_axes()[0]
+        self.assertEqual(ax.get_title(), "Test")
+        self.assertEqual(ax.get_xlabel(), "x-axis label")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(len(ax.get_xticklabels()), 4)
+        np.testing.assert_allclose(ax.get_xticks(),
+                                   [1.1125, 2.0125, 3.8125, 4.1125])
+
+    def test_grouped_distributions_insufficient_colors(self):
+        args = ('bar', self.ValidTypicalData, [1, 4, 10, 11],
+                ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
+                ['b', 'r'], "x-axis label", "y-axis label", "Test")
+
+        npt.assert_warns(RuntimeWarning,
+                         grouped_distributions,
+                         *args)
+
+    def test_grouped_distributions_scatter(self):
+        fig = grouped_distributions('scatter', self.ValidTypicalData,
+                                    [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
+                                    ["Infants", "Children", "Teens"],
+                                    ['^', '>', '<'], "x-axis label",
+                                    "y-axis label", "Test")
+        ax = fig.get_axes()[0]
+        self.assertEqual(ax.get_title(), "Test")
+        self.assertEqual(ax.get_xlabel(), "x-axis label")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(len(ax.get_xticklabels()), 4)
+        np.testing.assert_allclose(ax.get_xticks(),
+                                   [1.075, 1.975, 3.775, 4.075])
+
+    def test_grouped_distributions_insufficient_symbols(self):
+        args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11],
+                ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
+                ['^'], "x-axis label", "y-axis label", "Test")
+
+        npt.assert_warns(RuntimeWarning, grouped_distributions, *args)
+
+    def test_grouped_distributions_empty_marker_list(self):
+        grouped_distributions('scatter', self.ValidTypicalData,
+                              [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
+                              ["Infants", "Children", "Teens"], [],
+                              "x-axis label", "y-axis label", "Test")
+
+    def test_grouped_distributions_box(self):
+        fig = grouped_distributions('box', self.ValidTypicalData,
+                                    [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
+                                    ["Infants", "Children", "Teens"],
+                                    ['b', 'g', 'y'], "x-axis label",
+                                    "y-axis label", "Test")
+        ax = fig.get_axes()[0]
+        self.assertEqual(ax.get_title(), "Test")
+        self.assertEqual(ax.get_xlabel(), "x-axis label")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(len(ax.get_xticklabels()), 4)
+        np.testing.assert_allclose(ax.get_xticks(),
+                                   [1.075, 1.975, 3.775, 4.075])
+
+    def test_grouped_distributions_error(self):
+        with npt.assert_raises(ValueError):
+            grouped_distributions('pie', self.ValidTypicalData,
+                                  [1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
+                                  ["Infants", "Children", "Teens"],
+                                  ['b', 'g', 'y'],
+                                  "x-axis label", "y-axis label", "Test")
+
+    def test_grouped_distributions_negative_distribution_width(self):
+        args = ('box', self.ValidTypicalData, [1, 4, 10, 11],
+                ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
+                ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test")
+
+        with self.assertRaises(ValueError):
+            grouped_distributions(*args, distribution_width=0)
+
+        with self.assertRaises(ValueError):
+            grouped_distributions(*args, distribution_width=-42)
+
+    def test_boxplots(self):
+        fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10],
+                       ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
+                       "y-axis label",
+                       legend=(('blue', 'red'), ('foo', 'bar')))
+        ax = fig.get_axes()[0]
+        self.assertEqual(ax.get_title(), "Test")
+        self.assertEqual(ax.get_xlabel(), "x-axis label")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(len(ax.get_xticklabels()), 3)
+        self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
+
+    def test_boxplots_empty_distributions(self):
+        fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10],
+                       ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
+                       "y-axis label")
+        ax = fig.get_axes()[0]
+        self.assertEqual(ax.get_title(), "Test")
+        self.assertEqual(ax.get_xlabel(), "x-axis label")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(len(ax.get_xticklabels()), 3)
+        self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
+
+        # second distribution (empty) should have nans since it is hidden.
+        # boxplots in mpl < 1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has
+        # 7. in either case, the line at index 8 should have a nan for its y
+        # value
+        lines = ax.get_lines()
+        self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
+        # line in first distribution should *not* have nan for its y value
+        self.assertFalse(np.isnan(lines[0].get_xydata()[0][1]))
+
+        # All distributions are empty.
+        fig = boxplots([[], [], []], [1, 4, 10],
+                       ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
+                       "y-axis label")
+        ax = fig.get_axes()[0]
+        self.assertEqual(ax.get_title(), "Test")
+        self.assertEqual(ax.get_xlabel(), "x-axis label")
+        self.assertEqual(ax.get_ylabel(), "y-axis label")
+        self.assertEqual(len(ax.get_xticklabels()), 3)
+        self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
+
+        lines = ax.get_lines()
+        self.assertTrue(np.isnan(lines[0].get_xydata()[0][1]))
+        self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
+        self.assertTrue(np.isnan(lines[16].get_xydata()[0][1]))
+
+    def test_boxplots_box_colors(self):
+        # Coloring works with all empty distributions.
+        fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow'])
+        ax = fig.get_axes()[0]
+        self.assertEqual(len(ax.get_xticklabels()), 3)
+        # patch colors should match what we specified
+        self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
+        self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
+        self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
+        # patch location should include at least one nan since the distribution
+        # is empty, and thus hidden
+        for patch in ax.patches:
+            self.assertTrue(np.isnan(patch.xy[0][1]))
+
+        fig = boxplots([[], [], []], box_colors='pink')
+        ax = fig.get_axes()[0]
+        self.assertEqual(len(ax.get_xticklabels()), 3)
+        for patch in ax.patches:
+            npt.assert_almost_equal(
+                patch.get_facecolor(),
+                (1.0, 0.7529411764705882, 0.796078431372549, 1.0))
+            self.assertTrue(np.isnan(patch.xy[0][1]))
+
+        # Coloring works with some empty distributions.
+        fig = boxplots([[], [1, 2, 3.5], []],
+                       box_colors=['blue', 'red', 'yellow'])
+        ax = fig.get_axes()[0]
+        self.assertEqual(len(ax.get_xticklabels()), 3)
+        self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
+        self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
+        self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
+        self.assertTrue(np.isnan(ax.patches[0].xy[0][1]))
+        self.assertFalse(np.isnan(ax.patches[1].xy[0][1]))
+        self.assertTrue(np.isnan(ax.patches[2].xy[0][1]))
+
+    def test_boxplots_invalid_input(self):
+        # Non-numeric entries in distribution.
+        with npt.assert_raises(ValueError):
+            boxplots([[1, 'foo', 3]])
+
+        # Number of colors doesn't match number of distributions.
+        with npt.assert_raises(ValueError):
+            boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red'])
+
+        # Invalid legend.
+        with npt.assert_raises(ValueError):
+            boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz'))
+
+    def test_color_box_plot(self):
+        fig, ax = plt.subplots()
+        box_plot = plt.boxplot(self.ValidTypicalBoxData)
+        _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)])
+
+        # Some colors are None.
+        fig, ax = plt.subplots()
+        box_plot = plt.boxplot(self.ValidTypicalBoxData)
+        _color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)])
+
+        # All colors are None.
+        fig, ax = plt.subplots()
+        box_plot = plt.boxplot(self.ValidTypicalBoxData)
+        _color_box_plot(ax, box_plot, [None, None, None])
+
+    def test_color_box_plot_invalid_input(self):
+        # Invalid color.
+        fig, ax = plt.subplots()
+        box_plot = plt.boxplot(self.ValidTypicalBoxData)
+        with npt.assert_raises(ValueError):
+            _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue'])
+
+        # Wrong number of colors.
+        fig, ax = plt.subplots()
+        box_plot = plt.boxplot(self.ValidTypicalBoxData)
+        with npt.assert_raises(ValueError):
+            _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)])
+
+    def test_is_single_matplotlib_color(self):
+        self.assertTrue(_is_single_matplotlib_color('w'))
+        self.assertTrue(_is_single_matplotlib_color('white'))
+        self.assertTrue(_is_single_matplotlib_color([1, 1, 1]))
+        self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1]))
+        self.assertTrue(_is_single_matplotlib_color((1, 1, 1)))
+        self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1)))
+        self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0)))
+        self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0)))
+        self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0)))
+
+        self.assertFalse(_is_single_matplotlib_color(['w', 'r']))
+        self.assertFalse(_is_single_matplotlib_color(['w']))
+        self.assertFalse(_is_single_matplotlib_color(('w',)))
+        self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),)))
+        self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),
+                                                      (0.9, 0.9))))
+
+    def test_set_figure_size(self):
+        fig, ax = plt.subplots()
+        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
+                          x_tick_labels=['foofoofoo', 'barbarbar'],
+                          x_tick_labels_orientation='vertical')
+        _set_figure_size(fig, 3, 4)
+        self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4)))
+
+    def test_set_figure_size_defaults(self):
+        fig, ax = plt.subplots()
+        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
+                          x_tick_labels=['foofoofoo', 'barbarbar'],
+                          x_tick_labels_orientation='vertical')
+        orig_fig_size = fig.get_size_inches()
+        _set_figure_size(fig)
+        self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
+
+    def test_set_figure_size_invalid(self):
+        fig, ax = plt.subplots()
+        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
+                          x_tick_labels=['foofoofoo', 'barbarbar'],
+                          x_tick_labels_orientation='vertical')
+        orig_fig_size = fig.get_size_inches()
+        _set_figure_size(fig, -1, 0)
+        self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
+
+    def test_set_figure_size_long_labels(self):
+        fig, ax = plt.subplots()
+        _set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
+                          x_tick_labels=['foofoofooooooooooooooooooooooooo'
+                                         'oooooooooooooooooooooooooooooooo'
+                                         'oooooooooooooooooooooooooooooooo'
+                                         'oooo', 'barbarbar'],
+                          x_tick_labels_orientation='vertical')
+        npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3)
+        npt.assert_array_equal(fig.get_size_inches(), (3, 3))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/format/__init__.py b/skbio/format/__init__.py
new file mode 100644
index 0000000..18cec47
--- /dev/null
+++ b/skbio/format/__init__.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+test = Tester().test
diff --git a/skbio/format/sequences/__init__.py b/skbio/format/sequences/__init__.py
new file mode 100644
index 0000000..77f5ea1
--- /dev/null
+++ b/skbio/format/sequences/__init__.py
@@ -0,0 +1,39 @@
+r"""
+Format biological sequences (:mod:`skbio.format.sequences`)
+===========================================================
+
+.. currentmodule:: skbio.format.sequences
+
+This module provides functions for writing sequence files in a variety of
+different formats, the available formatters are listed below.
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+    fasta_from_sequences
+    fasta_from_alignment
+    format_fastq_record
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from .fasta import fasta_from_sequences, fasta_from_alignment
+from .fastq import format_fastq_record
+
+__all__ = ['fasta_from_sequences', 'fasta_from_alignment',
+           'format_fastq_record']
+
+
+test = Tester().test
diff --git a/skbio/format/sequences/fasta.py b/skbio/format/sequences/fasta.py
new file mode 100644
index 0000000..bf90a06
--- /dev/null
+++ b/skbio/format/sequences/fasta.py
@@ -0,0 +1,176 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import warnings
+
+from skbio.alignment import Alignment
+from skbio.sequence import BiologicalSequence
+
+
+def fasta_from_sequences(seqs, make_seqlabel=None, line_wrap=None):
+    """Returns a FASTA string given a list of sequence objects.
+
+    .. note:: Deprecated in scikit-bio 0.2.0-dev
+       ``fasta_from_sequences`` will be removed in scikit-bio 0.3.0. It is
+       replaced by ``write``, which is a more general method for serializing
+       FASTA-formatted files. ``write`` supports multiple file formats by
+       taking advantage of scikit-bio's I/O registry system. See
+       :mod:`skbio.io` for more details.
+
+    A ``sequence.Label`` attribute takes precedence over ``sequence.Name``.
+
+    Parameters
+    ----------
+    seqs : list
+        seqs can be a list of sequence objects or strings.
+    make_seqlabel : function, optional
+        callback function that takes the seq object and returns a label
+        ``str``. If ``None`` is passed, the following attributes will try to be
+        retrieved in this order and the first to exist will be used:
+        ``id``, ``Label`` or ``Name``. In any other case an integer
+        with the position of the sequence object will be used.
+    line_wrap : int, optional
+        line_wrap: a integer for maximum line width, if ``None`` is passed the
+        full sequence will be used.
+
+    Returns
+    -------
+    str
+        FASTA formatted string composed of the objects passed in via `seqs`.
+
+    See Also
+    --------
+    skbio.parse.sequences.parse_fasta
+
+    Examples
+    --------
+    Formatting a list of sequence objects
+
+    >>> from skbio.format.sequences import fasta_from_sequences
+    >>> from skbio.sequence import DNASequence
+    >>> seqs = [DNASequence('ACTCGAGATC', 'seq1'),
+    ...         DNASequence('GGCCT', 'seq2')]
+    >>> print fasta_from_sequences(seqs)
+    >seq1
+    ACTCGAGATC
+    >seq2
+    GGCCT
+
+    """
+    warnings.warn(
+        "`fasta_from_sequences` is deprecated and will be removed in "
+        "scikit-bio 0.3.0. Please update your code to use `skbio.io.write`.",
+        DeprecationWarning)
+
+    fasta_list = []
+    for i, seq in enumerate(seqs):
+        # Check if it has a label, or one is to be created
+        label = str(i)
+        if make_seqlabel is not None:
+            label = make_seqlabel(seq)
+        elif hasattr(seq, 'id') and seq.id:
+            label = seq.id
+        elif hasattr(seq, 'Label') and seq.Label:
+            label = seq.Label
+        elif hasattr(seq, 'Name') and seq.Name:
+            label = seq.Name
+
+        # wrap sequence lines
+        seq_str = str(seq)
+        if line_wrap is not None:
+            numlines, remainder = divmod(len(seq_str), line_wrap)
+            if remainder:
+                numlines += 1
+            body = [seq_str[j * line_wrap:(j + 1) * line_wrap]
+                    for j in range(numlines)]
+        else:
+            body = [seq_str]
+
+        fasta_list.append('>' + label)
+        fasta_list += body
+
+    return '\n'.join(fasta_list)
+
+
+def fasta_from_alignment(aln, make_seqlabel=None, line_wrap=None, sort=True):
+    """Returns a FASTA string given an alignment object
+
+    .. note:: Deprecated in scikit-bio 0.2.0-dev
+       ``fasta_from_alignment`` will be removed in scikit-bio 0.3.0. It is
+       replaced by ``write``, which is a more general method for serializing
+       FASTA-formatted files. ``write`` supports multiple file formats by
+       taking advantage of scikit-bio's I/O registry system. See
+       :mod:`skbio.io` for more details.
+
+    Parameters
+    ----------
+    aln : Alignment, dict
+        alignment or dictionary where the keys are the sequence ids and
+        the values are the sequences themselves.
+    make_seqlabel : function, optional
+        callback function that takes the seq object and returns a label
+        ``str``. If ``None`` is passed, the following attributes will try to be
+        retrieved in this order and the first to exist will be used:
+        ``id``, ``Label`` or ``Name``. In any other case an integer
+        with the position of the sequence object will be used.
+    line_wrap : int, optional
+        line_wrap: a integer for maximum line width, if ``None`` is passed the
+        full sequence will be used.
+    sort : bool, optional
+        Whether or not the sequences should be sorted by their sequence
+        id, default value is ``True``.
+
+    Returns
+    -------
+    str
+        FASTA formatted string composed of the objects passed in via `seqs`.
+
+    See Also
+    --------
+    skbio.parse.sequences.parse_fasta
+    skbio.alignment.Alignment
+
+    Examples
+    --------
+    Formatting a sequence alignment object into a FASTA file.
+
+    >>> from skbio.alignment import Alignment
+    >>> from skbio.sequence import DNA
+    >>> from skbio.format.sequences import fasta_from_alignment
+    >>> seqs = [DNA("ACC--G-GGTA..", id="seq1"),
+    ...         DNA("TCC--G-GGCA..", id="seqs2")]
+    >>> a1 = Alignment(seqs)
+    >>> print fasta_from_alignment(a1)
+    >seq1
+    ACC--G-GGTA..
+    >seqs2
+    TCC--G-GGCA..
+
+    """
+    warnings.warn(
+        "`fasta_from_alignment` is deprecated and will be removed in "
+        "scikit-bio 0.3.0. Please update your code to use `skbio.io.write` "
+        "or `skbio.Alignment.write`.", DeprecationWarning)
+
+    # check if it's an Alignment object or a dictionary
+    if isinstance(aln, Alignment):
+        order = aln.ids()
+    else:
+        order = aln.keys()
+
+    if sort:
+        order = sorted(order)
+
+    ordered_seqs = []
+    for label in order:
+        seq = aln[label]
+        if isinstance(seq, str):
+            seq = BiologicalSequence(seq, label)
+        ordered_seqs.append(seq)
+    return fasta_from_sequences(ordered_seqs, make_seqlabel=make_seqlabel,
+                                line_wrap=line_wrap)
diff --git a/skbio/format/sequences/fastq.py b/skbio/format/sequences/fastq.py
new file mode 100644
index 0000000..55a5f2f
--- /dev/null
+++ b/skbio/format/sequences/fastq.py
@@ -0,0 +1,78 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import warnings
+
+
+def _phred_to_ascii(a, offset):
+    """Convert Phred quality score to ASCII character with specified offset"""
+    return (a + offset).tostring()
+
+
+def _phred_to_ascii33(a):
+    """Convert Phred quality score to ASCII character with offset of 33"""
+    return _phred_to_ascii(a, 33)
+
+
+def _phred_to_ascii64(a):
+    """Convert Phred quality score to ASCII character with offset of 64"""
+    return _phred_to_ascii(a, 64)
+
+
+def format_fastq_record(seqid, seq, qual, phred_offset=33):
+    """Format a FASTQ record
+
+    .. note:: Deprecated in scikit-bio 0.2.0-dev
+       ``format_fastq_record`` will be removed in scikit-bio 0.3.0. It is
+       replaced by ``write``, which is a more general method for serializing
+       FASTQ-formatted files. ``write`` supports multiple file formats by
+       taking advantage of scikit-bio's I/O registry system. See
+       :mod:`skbio.io` for more details.
+
+    Parameters
+    ----------
+    seqid : bytes
+        The sequence ID
+    seq : bytes or subclass of BiologicalSequence
+        The sequence
+    qual : np.array of int8
+        The quality scores
+    phred_offset : int, either 33 or 64
+        Set a phred offset
+
+    Returns
+    -------
+    bytes : a string representation of a single FASTQ record
+
+    Examples
+    --------
+    >>> from skbio.format.sequences import format_fastq_record
+    >>> from numpy import array, int8
+    >>> seqid = 'seq1'
+    >>> seq = 'AATTGG'
+    >>> qual = array([38, 38, 39, 39, 40, 40], dtype=int8)
+    >>> print format_fastq_record(seqid, seq, qual),
+    @seq1
+    AATTGG
+    +
+    GGHHII
+
+    """
+    warnings.warn(
+        "`format_fastq_record` is deprecated and will be removed in "
+        "scikit-bio 0.3.0. Please update your code to use `skbio.io.write`.",
+        DeprecationWarning)
+
+    if phred_offset == 33:
+        phred_f = _phred_to_ascii33
+    elif phred_offset == 64:
+        phred_f = _phred_to_ascii64
+    else:
+        raise ValueError("Unknown phred offset: %d" % phred_offset)
+
+    return b'\n'.join([b"@" + seqid, seq, b'+', phred_f(qual), b''])
diff --git a/skbio/format/sequences/tests/__init__.py b/skbio/format/sequences/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/format/sequences/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/format/sequences/tests/test_fasta.py b/skbio/format/sequences/tests/test_fasta.py
new file mode 100644
index 0000000..dedc2fc
--- /dev/null
+++ b/skbio/format/sequences/tests/test_fasta.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+"""Tests for FASTA sequence format writer.
+"""
+from unittest import TestCase, main
+
+from skbio import DNASequence, BiologicalSequence, Alignment
+from skbio.format.sequences import fasta_from_sequences, fasta_from_alignment
+
+
+class FastaTests(TestCase):
+
+    """Tests for Fasta writer.
+    """
+
+    def setUp(self):
+        """Setup for Fasta tests."""
+        self.strings = ['AAAA', 'CCCC', 'gggg', 'uuuu']
+        self.fasta_no_label = '>0\nAAAA\n>1\nCCCC\n>2\ngggg\n>3\nuuuu'
+        self.fasta_with_label =\
+            '>1st\nAAAA\n>2nd\nCCCC\n>3rd\nGGGG\n>4th\nUUUU'
+        self.fasta_with_label_lw2 =\
+            '>1st\nAA\nAA\n>2nd\nCC\nCC\n>3rd\nGG\nGG\n>4th\nUU\nUU'
+        self.alignment_dict = {'1st': 'AAAA', '2nd': 'CCCC', '3rd': 'GGGG',
+                               '4th': 'UUUU'}
+        self.sequence_objects_a = [DNASequence('ACTCGAGATC', 'seq1'),
+                                   DNASequence('GGCCT', 'seq2')]
+        self.sequence_objects_b = [BiologicalSequence('ACTCGAGATC', 'seq1'),
+                                   BiologicalSequence('GGCCT', 'seq2')]
+        seqs = [DNASequence("ACC--G-GGTA..", id="seq1"),
+                DNASequence("TCC--G-GGCA..", id="seqs2")]
+        self.alignment = Alignment(seqs)
+
+    def test_fasta_from_sequence_objects(self):
+        """Check FASTA files are created correctly off of sequence objects"""
+        self.assertEqual(fasta_from_sequences(self.sequence_objects_a),
+                         FASTA_STRING)
+
+        self.assertEqual(fasta_from_sequences(self.sequence_objects_b),
+                         FASTA_STRING)
+
+    def test_fasta_from_sequences(self):
+        """should return correct fasta string."""
+        self.assertEqual(fasta_from_sequences(''), '')
+        self.assertEqual(fasta_from_sequences(self.strings),
+                         self.fasta_no_label)
+
+    def test_fasta_from_alignment(self):
+        """should return correct fasta string."""
+        self.assertEqual(fasta_from_alignment({}), '')
+        self.assertEqual(fasta_from_alignment(self.alignment_dict),
+                         self.fasta_with_label)
+        self.assertEqual(fasta_from_alignment(self.alignment_dict,
+                                              line_wrap=2),
+                         self.fasta_with_label_lw2)
+
+    def test_fasta_from_alignment_from_alignment(self):
+        """should return correct fasta string for alignment object"""
+        # alignment with a few sequences
+        obs = fasta_from_alignment(self.alignment)
+        self.assertEquals('>seq1\nACC--G-GGTA..\n>seqs2\nTCC--G-GGCA..', obs)
+
+        # empty alginment
+        obs = fasta_from_alignment(Alignment([]))
+        self.assertEquals('', obs)
+
+        # alignment with a few sequences
+        obs = fasta_from_alignment(self.alignment, sort=False)
+        self.assertEquals('>seq1\nACC--G-GGTA..\n>seqs2\nTCC--G-GGCA..', obs)
+
+
+FASTA_STRING = '>seq1\nACTCGAGATC\n>seq2\nGGCCT'
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/format/sequences/tests/test_fastq.py b/skbio/format/sequences/tests/test_fastq.py
new file mode 100644
index 0000000..cecad1f
--- /dev/null
+++ b/skbio/format/sequences/tests/test_fastq.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+import numpy as np
+from unittest import TestCase, main
+
+from skbio.format.sequences import format_fastq_record
+from skbio.format.sequences.fastq import _phred_to_ascii33, _phred_to_ascii64
+
+
+class FASTQFormatTests(TestCase):
+    def setUp(self):
+        self.qual_scores = np.array([38, 39, 40], dtype=np.int8)
+        self.args = (b'abc', b'def', self.qual_scores)
+
+    def test_format_fastq_record_phred_offset_33(self):
+        exp = b"@abc\ndef\n+\nGHI\n"
+        obs = format_fastq_record(*self.args, phred_offset=33)
+        self.assertEqual(obs, exp)
+
+    def test_format_fastq_record_phred_offset_64(self):
+        exp = b"@abc\ndef\n+\nfgh\n"
+        obs = format_fastq_record(*self.args, phred_offset=64)
+        self.assertEqual(obs, exp)
+
+    def test_format_fastq_record_invalid_phred_offset(self):
+        with self.assertRaises(ValueError):
+            format_fastq_record(*self.args, phred_offset=42)
+
+    def test_phred_to_ascii33(self):
+        obs = _phred_to_ascii33(self.qual_scores)
+        self.assertEqual(obs, b'GHI')
+
+    def test_phred_to_ascii64(self):
+        obs = _phred_to_ascii64(self.qual_scores)
+        self.assertEqual(obs, b'fgh')
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/io/__init__.py b/skbio/io/__init__.py
new file mode 100644
index 0000000..5bf5080
--- /dev/null
+++ b/skbio/io/__init__.py
@@ -0,0 +1,322 @@
+r"""
+File I/O (:mod:`skbio.io`)
+==========================
+
+.. currentmodule:: skbio.io
+
+This package provides I/O functionality for skbio.
+
+Introduction to I/O
+-------------------
+Reading and writing files (I/O) can be a complicated task:
+
+* A file format can sometimes be read into more than one in-memory
+  representation (i.e., object). For example, a FASTA file can be read into an
+  :mod:`skbio.alignment.SequenceCollection` or :mod:`skbio.alignment.Alignment`
+  depending on the file's contents and what operations you'd like to perform on
+  your data.
+* A single object might be writeable to more than one file format. For example,
+  an :mod:`skbio.alignment.Alignment` object could be written to FASTA, FASTQ,
+  QSEQ, PHYLIP, or Stockholm formats, just to name a few.
+* You might not know the exact file format of your file, but you want to read
+  it into an appropriate object.
+* You might want to read multiple files into a single object, or write an
+  object to multiple files.
+* Instead of reading a file into an object, you might want to stream the file
+  using a generator (e.g., if the file cannot be fully loaded into memory).
+
+To address these issues (and others), scikit-bio provides a simple, powerful
+interface for dealing with I/O. We accomplish this by using a single I/O
+registry. Below is a description of how to use the registry and how to extend
+it.
+
+Reading files into scikit-bio
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+There are two ways to read files. The first way is to use the
+procedural interface:
+
+``my_obj = skbio.io.read(<filehandle or filepath>, format='<format here>',
+into=<class to construct>)``
+
+The second is to use the object-oriented (OO) interface which is automatically
+constructed from the procedural interface:
+
+``my_obj = <class to construct>.read(<filehandle or filepath>,
+format='<format here>')``
+
+For example, to read a `newick` file using both interfaces you would type:
+
+>>> from skbio import read
+>>> from skbio import TreeNode
+>>> from io import StringIO
+>>> open_filehandle = StringIO(u'(a, b);')
+>>> tree = read(open_filehandle, format='newick', into=TreeNode)
+>>> tree
+<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
+
+For the OO interface:
+
+>>> open_filehandle = StringIO(u'(a, b);')
+>>> tree = TreeNode.read(open_filehandle, format='newick')
+>>> tree
+<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
+
+In the case of ``skbio.io.read`` if `into` is not provided, then a generator
+will be returned. What the generator yields will depend on what format is being
+read.
+
+When `into` is provided, format may be omitted and the registry will use its
+knowledge of the available formats for the requested class to infer the correct
+format. This format inference is also available in the OO interface, meaning
+that `format` may be omitted there as well.
+
+As an example:
+
+>>> open_filehandle = StringIO(u'(a, b);')
+>>> tree = TreeNode.read(open_filehandle)
+>>> tree
+<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
+
+We call format inference `sniffing`, much like the
+`csv <https://docs.python.org/2/library/csv.html#csv.Sniffer>`_ module of
+Python's standard library. The goal of a `sniffer` is twofold: to identify if a
+file is a specific format, and if it is, to provide `**kwargs` which can be
+used to better parse the file.
+
+.. note:: There is a built-in `sniffer` which results in a useful error message
+   if an empty file is provided as input and the format was omitted.
+
+Writing files from scikit-bio
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Just as when reading files, there are two ways to write files.
+
+Procedural Interface:
+
+``skbio.io.write(my_obj, format='<format here>',
+into=<filehandle or filepath>)``
+
+OO Interface:
+
+``my_obj.write(<filehandle or filepath>, format='<format here>')``
+
+In the procedural interface, `format` is required. Without it, scikit-bio does
+not know how you want to serialize an object. OO interfaces define a default
+`format`, so it may not be necessary to include it.
+
+Supported file formats
+^^^^^^^^^^^^^^^^^^^^^^
+For details on what objects are supported by each format,
+see the associated documentation.
+
+.. autosummary::
+   :toctree: generated/
+
+   clustal
+   fasta
+   fastq
+   lsmat
+   newick
+   ordination
+   phylip
+   qseq
+
+Formats are considered to be names which represent a way of encoding a file.
+
+User functions
+^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   write
+   read
+   sniff
+
+User exceptions
+^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   RecordError
+   FieldError
+   UnrecognizedFormatError
+   FileFormatError
+   ClustalFormatError
+   FASTAFormatError
+   FASTQFormatError
+   LSMatFormatError
+   NewickFormatError
+   OrdinationFormatError
+   PhylipFormatError
+   QSeqFormatError
+
+User warnings
+^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   FormatIdentificationWarning
+   ArgumentOverrideWarning
+
+Developer Documentation
+-----------------------
+To extend I/O in skbio, developers should create a submodule in `skbio/io/`
+named after the file format it implements.
+
+For example, if you were to create readers and writers for a `fasta` file, you
+would create a submodule `skbio/io/fasta.py`.
+In this submodule you would use the following decorators:
+``register_writer``, ``register_reader``, and ``register_sniffer``.
+These associate your functionality to a format string and potentially an skbio
+class. Please see the relevant documenation for more information about these
+functions and the specifications for `readers`, `writers`, and `sniffers`.
+
+Once you are satisfied with the functionality, you will need to ensure that
+`skbio/io/__init__.py` contains an import of your new submodule so the
+decorators are executed on importing the user functions above. Use the function
+``import_module('skbio.io.my_new_format')``.
+
+The following keyword args may not be used when defining new `readers` or
+`writers` as they already have special meaning to the registry system:
+
+- `format`
+- `into`
+- `mode`
+- `verify`
+
+If a keyword argument is a file, such as in the case of `fasta` with `qual`,
+then you can set the default to a specific marker, or sentinel, to indicate to
+the registry that the kwarg should have special handling. For example:
+
+.. code-block:: python
+
+   from skbio.io import FileSentinel
+
+   @register_reader(fasta, object)
+   def fasta_to_object(fh, qual=FileSentinel):
+       ...
+
+After the registry reads your function, it will replace `FileSentinel` with
+`None` allowing you to perform normal checks for kwargs
+(e.g. `if my_kwarg is not None:`). If a user provides input for the kwarg, the
+registry will convert it to an open filehandle.
+
+.. note:: Keyword arguments are not permitted in `sniffers`. `Sniffers` may not
+   raise exceptions; if an exception is thrown by a `sniffer`, the user will be
+   asked to report it on our `issue tracker
+   <https://github.com/biocore/scikit-bio/issues/>`_.
+
+When raising errors in readers and writers, the error should be a subclass of
+``FileFormatError`` specific to your new format.
+
+Writing unit tests
+^^^^^^^^^^^^^^^^^^
+Because scikit-bio handles all of the I/O boilerplate, you only need to test
+the actual business logic of your `readers`, `writers`, and `sniffers`. The
+easiest way to accomplish this is to create a list of files and their expected
+results when deserialized. Then you can iterate through the list ensuring the
+expected results occur and that the expected results can be reserialized into
+an equivalent file. This process is called 'roundtripping'.
+
+It is also important to test some invalid inputs and ensure that the correct
+error is raised by your `readers`. Consider using `assertRaises` as a context
+manager like so:
+
+.. code-block:: python
+
+   with self.assertRaises(SomeFileFormatErrorSubclass) as cm:
+       do_something_wrong()
+   self.assertIn('action verb or subject of an error', str(cm.exception))
+
+A good example to review when preparing to write your first I/O unit tests is
+the ordination test code (see in ``skbio/io/tests/test_ordination.py``).
+
+Developer functions
+^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+    :toctree: generated/
+
+    register_writer
+    register_reader
+    register_sniffer
+    list_write_formats
+    list_read_formats
+    get_writer
+    get_reader
+    get_sniffer
+
+Developer exceptions
+^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   DuplicateRegistrationError
+   InvalidRegistrationError
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from importlib import import_module
+
+from numpy.testing import Tester
+
+from ._warning import FormatIdentificationWarning, ArgumentOverrideWarning
+from ._exception import (DuplicateRegistrationError, InvalidRegistrationError,
+                         RecordError, FieldError, UnrecognizedFormatError,
+                         FileFormatError, ClustalFormatError, FASTAFormatError,
+                         FASTQFormatError, LSMatFormatError, NewickFormatError,
+                         OrdinationFormatError, PhylipFormatError,
+                         QSeqFormatError)
+from ._registry import (write, read, sniff, get_writer, get_reader,
+                        get_sniffer, list_write_formats, list_read_formats,
+                        register_writer, register_reader, register_sniffer,
+                        initialize_oop_interface, FileSentinel)
+
+__all__ = ['write', 'read', 'sniff',
+           'list_write_formats', 'list_read_formats',
+           'get_writer', 'get_reader', 'get_sniffer',
+           'register_writer', 'register_reader', 'register_sniffer',
+           'initialize_oop_interface', 'FileSentinel',
+
+           'FormatIdentificationWarning', 'ArgumentOverrideWarning',
+
+           'DuplicateRegistrationError', 'InvalidRegistrationError',
+           'RecordError', 'FieldError', 'UnrecognizedFormatError',
+
+           'FileFormatError',
+           'ClustalFormatError',
+           'FASTAFormatError',
+           'FASTQFormatError',
+           'LSMatFormatError',
+           'NewickFormatError',
+           'OrdinationFormatError',
+           'PhylipFormatError',
+           'QSeqFormatError']
+
+# Necessary to import each file format module to have them added to the I/O
+# registry. We use import_module instead of a typical import to avoid flake8
+# unused import errors.
+import_module('skbio.io.clustal')
+import_module('skbio.io.fasta')
+import_module('skbio.io.fastq')
+import_module('skbio.io.lsmat')
+import_module('skbio.io.newick')
+import_module('skbio.io.ordination')
+import_module('skbio.io.phylip')
+import_module('skbio.io.qseq')
+
+# Now that all of our I/O has loaded, we can add the object oriented methods
+# (read and write) to each class which has registered I/O operations.
+initialize_oop_interface()
+
+test = Tester().test
diff --git a/skbio/io/_base.py b/skbio/io/_base.py
new file mode 100644
index 0000000..23776ab
--- /dev/null
+++ b/skbio/io/_base.py
@@ -0,0 +1,187 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range
+
+import re
+import warnings
+
+from skbio.util import cardinal_to_ordinal
+
+_whitespace_regex = re.compile(r'\s')
+_newline_regex = re.compile(r'\n')
+
+
+def _chunk_str(s, n, char):
+    """Insert `char` character every `n` characters in string `s`.
+
+    Canonically pronounced "chunkster".
+
+    """
+    # Modified from http://stackoverflow.com/a/312464/3776794
+    if n < 1:
+        raise ValueError(
+            "Cannot split string into chunks with n=%d. n must be >= 1." % n)
+    return char.join((s[i:i+n] for i in range(0, len(s), n)))
+
+
+def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
+    phred_offset, phred_range = _get_phred_offset_and_range(
+        variant, phred_offset,
+        ["Must provide either `variant` or `phred_offset` in order to decode "
+         "quality scores.",
+         "Decoding Solexa quality scores is not currently supported, "
+         "as quality scores are always stored as Phred scores in "
+         "scikit-bio. Please see the following scikit-bio issue to "
+         "track progress on this:\n\t"
+         "https://github.com/biocore/scikit-bio/issues/719"])
+
+    phred = []
+    for c in qual_str:
+        score = ord(c) - phred_offset
+        if phred_range[0] <= score <= phred_range[1]:
+            phred.append(score)
+        else:
+            raise ValueError("Decoded Phred score %d is out of range [%d, %d]."
+                             % (score, phred_range[0], phred_range[1]))
+    return phred
+
+
+def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
+    phred_offset, phred_range = _get_phred_offset_and_range(
+        variant, phred_offset,
+        ["Must provide either `variant` or `phred_offset` in order to encode "
+         "Phred scores.",
+         "Encoding Solexa quality scores is not currently supported. "
+         "Please see the following scikit-bio issue to track progress "
+         "on this:\n\t"
+         "https://github.com/biocore/scikit-bio/issues/719"])
+
+    qual_chars = []
+    for score in phred:
+        if score < phred_range[0]:
+            raise ValueError("Phred score %d is out of range [%d, %d]."
+                             % (score, phred_range[0], phred_range[1]))
+        if score > phred_range[1]:
+            warnings.warn(
+                "Phred score %d is out of targeted range [%d, %d]. Converting "
+                "to %d." % (score, phred_range[0], phred_range[1],
+                            phred_range[1]), UserWarning)
+            score = phred_range[1]
+        qual_chars.append(chr(score + phred_offset))
+    return ''.join(qual_chars)
+
+
+def _get_phred_offset_and_range(variant, phred_offset, errors):
+    if variant is None and phred_offset is None:
+        raise ValueError(errors[0])
+    if variant is not None and phred_offset is not None:
+        raise ValueError(
+            "Cannot provide both `variant` and `phred_offset`.")
+
+    if variant is not None:
+        if variant == 'sanger':
+            phred_offset = 33
+            phred_range = (0, 93)
+        elif variant == 'illumina1.3':
+            phred_offset = 64
+            phred_range = (0, 62)
+        elif variant == 'illumina1.8':
+            phred_offset = 33
+            phred_range = (0, 62)
+        elif variant == 'solexa':
+            phred_offset = 64
+            phred_range = (-5, 62)
+            raise NotImplementedError(errors[1])
+        else:
+            raise ValueError("Unrecognized variant %r." % variant)
+    else:
+        if not (33 <= phred_offset <= 126):
+            raise ValueError(
+                "`phred_offset` %d is out of printable ASCII character range."
+                % phred_offset)
+        phred_range = (0, 126 - phred_offset)
+
+    return phred_offset, phred_range
+
+
+def _get_nth_sequence(generator, seq_num):
+    # i is set to None so that an empty generator will not result in an
+    # undefined variable when compared to seq_num.
+    i = None
+    if seq_num is None or seq_num < 1:
+        raise ValueError('Invalid sequence number (`seq_num`=%s). `seq_num`'
+                         ' must be between 1 and the number of sequences in'
+                         ' the file.' % str(seq_num))
+    try:
+        for i, seq in zip(range(1, seq_num + 1), generator):
+            pass
+    finally:
+        generator.close()
+
+    if i == seq_num:
+        return seq
+    raise ValueError('Reached end of file before finding the %s sequence.'
+                     % cardinal_to_ordinal(seq_num))
+
+
+def _parse_fasta_like_header(line):
+    id_ = ''
+    desc = ''
+    header = line[1:].rstrip()
+    if header:
+        if header[0].isspace():
+            # no id
+            desc = header.lstrip()
+        else:
+            header_tokens = header.split(None, 1)
+            if len(header_tokens) == 1:
+                # no description
+                id_ = header_tokens[0]
+            else:
+                id_, desc = header_tokens
+    return id_, desc
+
+
+def _format_fasta_like_records(generator, id_whitespace_replacement,
+                               description_newline_replacement, require_qual):
+    if ((id_whitespace_replacement is not None and
+         '\n' in id_whitespace_replacement) or
+        (description_newline_replacement is not None and
+         '\n' in description_newline_replacement)):
+        raise ValueError(
+            "Newline character (\\n) cannot be used to replace whitespace in "
+            "sequence IDs, nor to replace newlines in sequence descriptions.")
+
+    for idx, seq in enumerate(generator):
+        if len(seq) < 1:
+            raise ValueError(
+                "%s sequence does not contain any characters (i.e., it is an "
+                "empty/blank sequence). Writing empty sequences is not "
+                "supported." % cardinal_to_ordinal(idx + 1))
+
+        id_ = seq.id
+        if id_whitespace_replacement is not None:
+            id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
+
+        desc = seq.description
+        if description_newline_replacement is not None:
+            desc = _newline_regex.sub(description_newline_replacement, desc)
+
+        if desc:
+            header = '%s %s' % (id_, desc)
+        else:
+            header = id_
+
+        if require_qual and not seq.has_quality():
+            raise ValueError(
+                "Cannot write %s sequence because it does not have quality "
+                "scores associated with it." % cardinal_to_ordinal(idx + 1))
+
+        yield header, seq.sequence, seq.quality
diff --git a/skbio/io/_exception.py b/skbio/io/_exception.py
new file mode 100644
index 0000000..66c1b31
--- /dev/null
+++ b/skbio/io/_exception.py
@@ -0,0 +1,95 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+
+class FileFormatError(Exception):
+    """Raised when a file cannot be parsed."""
+    pass
+
+
+class RecordError(FileFormatError):
+    """Raised when a record is bad."""
+    pass
+
+
+class FieldError(RecordError):
+    """Raised when a field within a record is bad."""
+    pass
+
+
+class UnrecognizedFormatError(FileFormatError):
+    """Raised when a file's format is unknown, ambiguous, or unidentifiable."""
+    pass
+
+
+class ClustalFormatError(FileFormatError):
+    """Raised when a ``clustal`` formatted file cannot be parsed."""
+    pass
+
+
+class FASTAFormatError(FileFormatError):
+    """Raised when a ``fasta`` formatted file cannot be parsed."""
+    pass
+
+
+class LSMatFormatError(FileFormatError):
+    """Raised when a ``lsmat`` formatted file cannot be parsed."""
+    pass
+
+
+class OrdinationFormatError(FileFormatError):
+    """Raised when an ``ordination`` formatted file cannot be parsed."""
+    pass
+
+
+class NewickFormatError(FileFormatError):
+    """Raised when a ``newick`` formatted file cannot be parsed."""
+    pass
+
+
+class FASTQFormatError(FileFormatError):
+    """Raised when a ``fastq`` formatted file cannot be parsed."""
+    pass
+
+
+class PhylipFormatError(FileFormatError):
+    """Raised when a ``phylip`` formatted file cannot be parsed.
+
+    May also be raised when an object (e.g., ``Alignment``) cannot be written
+    in ``phylip`` format.
+
+    """
+    pass
+
+
+class QSeqFormatError(FileFormatError):
+    """Raised when a ``qseq`` formatted file cannot be parsed."""
+    pass
+
+
+class InvalidRegistrationError(Exception):
+    """Raised if function doesn't meet the expected API of its registration."""
+    pass
+
+
+class DuplicateRegistrationError(Exception):
+    """Raised when a function is already registered in skbio.io"""
+
+    def __init__(self, name=None, fmt=None, cls=None, msg=None):
+        super(DuplicateRegistrationError, self).__init__()
+        if msg:
+            self.args = (msg,)
+        else:
+            if hasattr(cls, '__name__'):
+                classname = cls.__name__
+            else:
+                classname = 'generator'
+            self.args = ("'%s' already has a %s for %s."
+                         % (fmt, name, classname),)
diff --git a/skbio/io/_registry.py b/skbio/io/_registry.py
new file mode 100644
index 0000000..1d413b6
--- /dev/null
+++ b/skbio/io/_registry.py
@@ -0,0 +1,818 @@
+from __future__ import absolute_import, division, print_function
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from warnings import warn
+import types
+import copy
+import traceback
+import inspect
+
+from future.builtins import zip
+
+from . import (UnrecognizedFormatError, InvalidRegistrationError,
+               DuplicateRegistrationError, ArgumentOverrideWarning,
+               FormatIdentificationWarning)
+from .util import open_file, open_files
+
+_formats = {}
+_sniffers = {}
+_aliases = {}
+_empty_file_format = '<emptyfile>'
+
+# We create a class and instantiate it dynamically so that exceptions are more
+# obvious and so that only one object exists without copying this line.
+FileSentinel = type('FileSentinel', (object, ), {})()
+
+
+def _override_kwargs(kw, fmt_kw, warn_user):
+    for key in kw:
+        if key in fmt_kw and fmt_kw[key] != kw[key] and warn_user:
+            warn('Best guess was: %s=%s, continuing with user supplied: %s' % (
+                key, str(fmt_kw[key]), str(kw[key])
+            ), ArgumentOverrideWarning)
+        fmt_kw[key] = kw[key]
+    return fmt_kw
+
+
+def register_sniffer(format):
+    """Return a decorator for a sniffer function.
+
+    A decorator factory for sniffer functions. Sniffers may only be registered
+    to simple formats. Sniffers for compound formats are automatically
+    generated from their component simple formats.
+
+    A sniffer function should have at least the following signature:
+    ``<format_name>_sniffer(fh)``. `fh` is **always** an open filehandle.
+    This decorator provides the ability to use filepaths in the same argument
+    position as `fh`. They will automatically be opened and closed.
+
+    **The sniffer must not close the filehandle**, cleanup will be
+    handled external to the sniffer and is not its concern.
+
+    `**kwargs` are not passed to a sniffer, and a sniffer must not use them.
+
+    The job of a sniffer is to determine if a file appears to be in the given
+    format and to 'sniff' out any kwargs that would be of use to a reader
+    function.
+
+    The sniffer **must** return a tuple of (True, <kwargs dict>) if it believes
+    `fh` is a given `format`. Otherwise it should return (False, {}).
+
+    .. note:: Failure to adhere to the above interface specified for a sniffer
+       will result in unintended side-effects.
+
+    The sniffer may determine membership of a file in as many or as few
+    lines of the file as it deems necessary.
+
+    Parameters
+    ----------
+    format : str
+        A format name which a decorated sniffer will be bound to.
+
+    Returns
+    -------
+    function
+        A decorator to be used on a sniffer. The decorator will raise a
+        ``skbio.io.DuplicateRegistrationError`` if there already exists a
+        *sniffer* bound to the `format`.
+
+    See Also
+    --------
+    skbio.io.sniff
+
+    """
+    def decorator(sniffer):
+        if format in _sniffers:
+            raise DuplicateRegistrationError(msg="'%s' already has a sniffer."
+                                             % format)
+
+        def wrapped_sniffer(fp, mode='U', **kwargs):
+            with open_file(fp, mode) as fh:
+                # The reason we do a copy is because we need the sniffer to not
+                # mutate the orginal file while guessing the format. The
+                # naive solution would be to seek to 0 at the end, but that
+                # would break an explicit offset provided by the user. Instead
+                # we create a shallow copy which works out of the box for
+                # file-like object, but does not work for real files. Instead
+                # the name attribute is reused in open for a new filehandle.
+                # Using seek and tell is not viable because in real files tell
+                # reflects the position of the read-ahead buffer and not the
+                # true offset of the iterator.
+                if hasattr(fh, 'name'):
+                    cfh = open(fh.name, fh.mode)
+                else:
+                    cfh = copy.copy(fh)
+                    cfh.seek(0)
+                try:
+                    return sniffer(cfh, **kwargs)
+                except Exception:
+                    warn("'%s' has encountered a problem.\n"
+                         "Please send the following to our issue tracker at\n"
+                         "https://github.com/biocore/scikit-bio/issues\n\n"
+                         "%s" % (sniffer.__name__, traceback.format_exc()),
+                         FormatIdentificationWarning)
+                    return False, {}
+                finally:
+                    cfh.close()
+
+        wrapped_sniffer.__doc__ = sniffer.__doc__
+        wrapped_sniffer.__name__ = sniffer.__name__
+
+        _sniffers[format] = wrapped_sniffer
+        return wrapped_sniffer
+    return decorator
+
+
+def register_reader(format, cls=None):
+    """Return a decorator for a reader function.
+
+    A decorator factory for reader functions.
+
+    A reader function should have at least the following signature:
+    ``<format_name>_to_<class_name_or_generator>(fh)``. `fh` is **always** an
+    open filehandle. This decorator provides the ability to use filepaths in
+    the same argument position as `fh`. They will automatically be opened and
+    closed.
+
+    **The reader must not close the filehandle**, cleanup will be
+    handled external to the reader and is not its concern. This is true even
+    in the case of generators.
+
+    Any additional `**kwargs` will be passed to the reader and may
+    be used if necessary.
+
+    The reader **must** return an instance of `cls` if `cls` is not None.
+    Otherwise the reader must return a generator. The generator need not deal
+    with closing the `fh`. That is already handled by this decorator.
+
+    .. note:: Failure to adhere to the above interface specified for a reader
+       will result in unintended side-effects.
+
+    Parameters
+    ----------
+    format : str
+        A format name which a decorated reader will be bound to.
+    cls : type, optional
+        The class which a decorated reader will be bound to. When `cls` is None
+        the reader will be bound as returning a generator.
+        Default is None.
+
+    Returns
+    -------
+    function
+        A decorator to be used on a reader. The decorator will raise a
+        ``skbio.io.DuplicateRegistrationError`` if there already exists a
+        *reader* bound to the same permutation of `fmt` and `cls`.
+
+    See Also
+    --------
+    skbio.io.read
+
+    """
+    def decorator(reader):
+        format_class = _formats.setdefault(format, {}).setdefault(cls, {})
+
+        if 'reader' in format_class:
+            raise DuplicateRegistrationError('reader', format, cls)
+
+        file_args = []
+        reader_spec = inspect.getargspec(reader)
+        if reader_spec.defaults is not None:
+            # Concept from http://stackoverflow.com/a/12627202/579416
+            for key, default in zip(
+                    reader_spec.args[-len(reader_spec.defaults):],
+                    reader_spec.defaults):
+                if default is FileSentinel:
+                    file_args.append(key)
+
+        # We wrap the reader so that basic file handling can be managed
+        # externally from the business logic.
+        if cls is None:
+            def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
+                file_keys = []
+                files = [fp]
+                for file_arg in file_args:
+                    if file_arg in kwargs:
+                        if kwargs[file_arg] is not None:
+                            file_keys.append(file_arg)
+                            files.append(kwargs[file_arg])
+                    else:
+                        kwargs[file_arg] = None
+
+                with open_files(files, mode) as fhs:
+                    try:
+                        for key, fh in zip(file_keys, fhs[1:]):
+                            kwargs[key] = fh
+
+                        generator = reader(fhs[0], **kwargs)
+                        if not isinstance(generator, types.GeneratorType):
+                            # Raise an exception to be handled next line,
+                            # because although reader executed without error,
+                            # it is not a generator.
+                            raise Exception()
+                    # If an exception is thrown at this point, it cannot
+                    # be a generator. If there was a `yield` statment, then
+                    # Python would have returned a generator regardless of the
+                    # content. This does not preclude the generator from
+                    # throwing exceptions.
+                    except Exception:
+                            raise InvalidRegistrationError("'%s' is not a "
+                                                           "generator." %
+                                                           reader.__name__)
+
+                    while True:
+                        yield next(generator)
+
+        else:
+            # When an object is instantiated we don't need to worry about the
+            # original position at every step, only at the end.
+            def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
+                file_keys = []
+                files = [fp]
+                for file_arg in file_args:
+                    if file_arg in kwargs:
+                        if kwargs[file_arg] is not None:
+                            file_keys.append(file_arg)
+                            files.append(kwargs[file_arg])
+                    else:
+                        kwargs[file_arg] = None
+
+                with open_files(files, mode) as fhs:
+                    for key, fh in zip(file_keys, fhs[1:]):
+                        kwargs[key] = fh
+                    return reader(fhs[0], **kwargs)
+
+        wrapped_reader.__doc__ = reader.__doc__
+        wrapped_reader.__name__ = reader.__name__
+
+        format_class['reader'] = wrapped_reader
+        return wrapped_reader
+    return decorator
+
+
+def register_writer(format, cls=None):
+    """Return a decorator for a writer function.
+
+    A decorator factory for writer functions.
+
+    A writer function should have at least the following signature:
+    ``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always**
+    an open filehandle. This decorator provides the ability to use filepaths in
+    the same argument position as `fh`. They will automatically be opened and
+    closed.
+
+    **The writer must not close the filehandle**, cleanup will be
+    handled external to the reader and is not its concern.
+
+    Any additional `**kwargs` will be passed to the writer and may be used if
+    necessary.
+
+    The writer must not return a value. Instead it should only mutate the `fh`
+    in a way consistent with it's purpose.
+
+    If the writer accepts a generator, it should exhaust the generator to
+    ensure that the potentially open filehandle backing said generator is
+    closed.
+
+    .. note:: Failure to adhere to the above interface specified for a writer
+       will result in unintended side-effects.
+
+    Parameters
+    ----------
+    format : str
+        A format name which a decorated writer will be bound to.
+    cls : type, optional
+        The class which a decorated writer will be bound to. If `cls` is None
+        the writer will be bound as expecting a generator.
+        Default is None.
+
+    Returns
+    -------
+    function
+        A decorator to be used on a writer. The decorator will raise a
+        ``skbio.io.DuplicateRegistrationError`` if there already exists a
+        *writer* bound to the same permutation of `fmt` and `cls`.
+
+    See Also
+    --------
+    skbio.io.write
+    skbio.io.get_writer
+
+    """
+    def decorator(writer):
+        format_class = _formats.setdefault(format, {}).setdefault(cls, {})
+
+        if 'writer' in format_class:
+            raise DuplicateRegistrationError('writer', format, cls)
+
+        file_args = []
+        writer_spec = inspect.getargspec(writer)
+        if writer_spec.defaults is not None:
+            # Concept from http://stackoverflow.com/a/12627202/579416
+            for key, default in zip(
+                    writer_spec.args[-len(writer_spec.defaults):],
+                    writer_spec.defaults):
+                if default is FileSentinel:
+                    file_args.append(key)
+
+        # We wrap the writer so that basic file handling can be managed
+        # externally from the business logic.
+        def wrapped_writer(obj, fp, mode='w', **kwargs):
+            file_keys = []
+            files = [fp]
+            for file_arg in file_args:
+                if file_arg in kwargs:
+                    if kwargs[file_arg] is not None:
+                        file_keys.append(file_arg)
+                        files.append(kwargs[file_arg])
+                else:
+                    kwargs[file_arg] = None
+
+            with open_files(files, mode) as fhs:
+                for key, fh in zip(file_keys, fhs[1:]):
+                    kwargs[key] = fh
+                writer(obj, fhs[0], **kwargs)
+
+        wrapped_writer.__doc__ = writer.__doc__
+        wrapped_writer.__name__ = writer.__name__
+
+        format_class['writer'] = wrapped_writer
+        return wrapped_writer
+    return decorator
+
+
+def list_read_formats(cls):
+    """Return a list of available read formats for a given `cls` type.
+
+    Parameters
+    ----------
+    cls : type
+        The class which will be used to determine what read formats exist for
+        an instance of `cls`.
+
+    Returns
+    -------
+    list
+        A list of available read formats for an instance of `cls`. List may be
+        empty.
+
+    See Also
+    --------
+    skbio.io.register_reader
+
+    """
+    return _rw_list_formats('reader', cls)
+
+
+def list_write_formats(cls):
+    """Return a list of available write formats for a given `cls` instance.
+
+    Parameters
+    ----------
+    cls : type
+        The class which will be used to determine what write formats exist for
+        an instance of `cls`.
+
+    Returns
+    -------
+    list
+        A list of available write formats for an instance of `cls`. List may be
+        empty.
+
+    See Also
+    --------
+    skbio.io.register_writer
+
+    """
+    return _rw_list_formats('writer', cls)
+
+
+def _rw_list_formats(name, cls):
+    formats = []
+    for fmt in _formats:
+        if cls in _formats[fmt] and name in _formats[fmt][cls]:
+            formats.append(fmt)
+    return formats
+
+
+def get_sniffer(format):
+    """Return a sniffer for a format.
+
+    Parameters
+    ----------
+    format : str
+        A format string which has a registered sniffer.
+
+    Returns
+    -------
+    function or None
+        Returns a sniffer function if one exists for the given `fmt`.
+        Otherwise it will return None.
+
+    See Also
+    --------
+    skbio.io.register_sniffer
+
+    """
+    return _sniffers.get(format, None)
+
+
+def get_reader(format, cls=None):
+    """Return a reader for a format.
+
+    Parameters
+    ----------
+    format : str
+        A registered format string.
+    cls : type, optional
+        The class which the reader will return an instance of. If `cls` is
+        None, the reader will return a generator.
+        Default is None.
+
+    Returns
+    -------
+    function or None
+        Returns a reader function if one exists for a given `fmt` and `cls`.
+        Otherwise it will return None.
+
+    See Also
+    --------
+    skbio.io.register_reader
+
+    """
+    return _rw_getter('reader', format, cls)
+
+
+def get_writer(format, cls=None):
+    """Return a writer for a format.
+
+    Parameters
+    ----------
+    format : str
+        A registered format string.
+    cls : type, optional
+        The class which the writer will expect an instance of. If `cls` is
+        None, the writer will expect a generator that is identical to what
+        is returned by ``get_reader(<some_format>, None)``.
+        Default is None.
+
+    Returns
+    -------
+    function or None
+        Returns a writer function if one exists for a given `fmt` and `cls`.
+        Otherwise it will return None.
+
+    See Also
+    --------
+    skbio.io.register_writer
+    skbio.io.get_reader
+
+    """
+    return _rw_getter('writer', format, cls)
+
+
+def _rw_getter(name, fmt, cls):
+    if fmt in _formats:
+        if cls in _formats[fmt] and name in _formats[fmt][cls]:
+                return _formats[fmt][cls][name]
+    return None
+
+
+def sniff(fp, cls=None, mode='U'):
+    """Attempt to guess the format of a file and return format str and kwargs.
+
+    Parameters
+    ----------
+    fp : filepath or filehandle
+        The provided file to guess the format of. Filepaths are automatically
+        closed; filehandles are the responsibility of the caller.
+    cls : type, optional
+        A provided class that restricts the search for the format. Only formats
+        which have a registered reader or writer for the given `cls` will be
+        tested.
+        Default is None.
+
+    Returns
+    -------
+    (str, kwargs)
+        A format name and kwargs for the corresponding reader.
+
+    Raises
+    ------
+    UnrecognizedFormatError
+        This occurs when the format is not 'claimed' by any registered sniffer
+        or when the format is ambiguous and has been 'claimed' by more than one
+        sniffer.
+
+    See Also
+    --------
+    skbio.io.register_sniffer
+
+    """
+    possibles = []
+    for fmt in _sniffers:
+        if cls is not None and fmt != _empty_file_format and (
+                fmt not in _formats or cls not in _formats[fmt]):
+            continue
+        format_sniffer = _sniffers[fmt]
+        is_format, fmt_kwargs = format_sniffer(fp, mode=mode)
+        if is_format:
+            possibles.append(fmt)
+            kwargs = fmt_kwargs
+
+    if not possibles:
+        raise UnrecognizedFormatError("Cannot guess the format for %s."
+                                      % str(fp))
+    if len(possibles) > 1:
+        raise UnrecognizedFormatError("File format is ambiguous, may be"
+                                      " one of %s." % str(possibles))
+    return possibles[0], kwargs
+
+
+def read(fp, format=None, into=None, verify=True, mode='U', **kwargs):
+    """Read a supported skbio file format into an instance or a generator.
+
+    This function is able to reference and execute all *registered* read
+    operations in skbio.
+
+    Parameters
+    ----------
+    fp : filepath or filehandle
+        The location to read the given `format` `into`. Filepaths are
+        automatically closed when read; filehandles are the responsibility
+        of the caller. In the case of a generator, a filepath will be closed
+        when ``StopIteration`` is raised; filehandles are still the
+        responsibility of the caller.
+    format : str, optional
+        The format must be a format name with a reader for the given
+        `into` class. If a `format` is not provided or is None, all
+        registered sniffers for the provied `into` class will be evaluated to
+        attempt to guess the format.
+        Default is None.
+    into : type, optional
+        A class which has a registered reader for a given `format`. If `into`
+        is not provided or is None, read will return a generator.
+        Default is None.
+    verify : bool, optional
+        Whether or not to confirm the format of a file if `format` is provided.
+        Will raise a ``skbio.io.FormatIdentificationWarning`` if the sniffer of
+        `format` returns False.
+        Default is True.
+    mode : str, optional
+        The read mode. This is passed to `open(fp, mode)` internally.
+        Default is 'U'
+    kwargs : dict, optional
+        Will be passed directly to the appropriate reader.
+
+    Returns
+    -------
+    object or generator
+        If `into` is not None, an instance of the `into` class will be
+        provided with internal state consistent with the provided file.
+        If `into` is None, a generator will be returned.
+
+    Raises
+    ------
+    ValueError
+        Raised when `format` and `into` are both None.
+    skbio.io.UnrecognizedFormatError
+        Raised when a reader could not be found for a given `format` or the
+        format could not be guessed.
+    skbio.io.FormatIdentificationWarning
+        Raised when `verify` is True and the sniffer of a `format` provided a
+        kwarg value that did not match the user's kwarg value.
+
+    See Also
+    --------
+    skbio.io.register_reader
+    skbio.io.register_sniffer
+
+    """
+    if format is None and into is None:
+        raise ValueError("`format` and `into` cannot both be None.")
+
+    if format is None:
+        format, fmt_kwargs = sniff(fp, cls=into, mode=mode)
+        kwargs = _override_kwargs(kwargs, fmt_kwargs, verify)
+    elif verify:
+        sniffer = get_sniffer(format)
+        if sniffer is not None:
+            is_format, fmt_kwargs = sniffer(fp)
+            if not is_format:
+                warn("%s could not be positively identified as %s file." %
+                     (str(fp), format),
+                     FormatIdentificationWarning)
+            else:
+                kwargs = _override_kwargs(kwargs, fmt_kwargs, True)
+
+    reader = get_reader(format, into)
+    if reader is None:
+        raise UnrecognizedFormatError("Cannot read %s into %s, no reader "
+                                      "found." % (format, into.__name__
+                                                  if into is not None
+                                                  else 'generator'))
+    return reader(fp, mode=mode, **kwargs)
+
+
+def write(obj, format, into, mode='w', **kwargs):
+    """Write a supported skbio file format from an instance or a generator.
+
+    This function is able to reference and execute all *registered* write
+    operations in skbio.
+
+    Parameters
+    ----------
+    obj : object
+        The object must have a registered writer for a provided `format`.
+    format : str
+        The format must be a registered format name with a writer for the given
+        `obj`.
+    into : filepath or filehandle
+        The location to write the given `format` from `obj` into. Filepaths are
+        automatically closed when written; filehandles are the responsibility
+        of the caller.
+    mode : str, optional
+        The write mode. This is passed to `open(fp, mode)` internally.
+        Default is 'w'.
+    kwargs : dict, optional
+        Will be passed directly to the appropriate writer.
+
+    Raises
+    ------
+    skbio.io.UnrecognizedFormatError
+        Raised when a writer could not be found for the given `format` and
+        `obj`.
+
+    See Also
+    --------
+    skbio.io.register_writer
+
+    """
+    cls = None
+    if not isinstance(obj, types.GeneratorType):
+        cls = obj.__class__
+    writer = get_writer(format, cls)
+    if writer is None:
+        raise UnrecognizedFormatError("Cannot write %s into %s, no %s writer "
+                                      "found." % (format, str(into),
+                                                  'generator' if cls is None
+                                                  else str(cls)))
+
+    writer(obj, into, mode=mode, **kwargs)
+
+
+# This is meant to be a handy indicator to the user that they have done
+# something wrong.
+ at register_sniffer(_empty_file_format)
+def empty_file_sniffer(fh):
+    for line in fh:
+        if line.strip():
+            return False, {}
+    return True, {}
+
+
+def initialize_oop_interface():
+    classes = set()
+    # Find each potential class
+    for fmt in _formats:
+        for cls in _formats[fmt]:
+            classes.add(cls)
+    # Add readers and writers for each class
+    for cls in classes:
+        if cls is not None:
+            _apply_read(cls)
+            _apply_write(cls)
+
+
+def _apply_read(cls):
+    """Add read method if any formats have a registered reader for `cls`."""
+    skbio_io_read = globals()['read']
+    read_formats = list_read_formats(cls)
+    if read_formats:
+        @classmethod
+        def read(cls, fp, format=None, **kwargs):
+            return skbio_io_read(fp, into=cls, format=format, **kwargs)
+
+        read.__func__.__doc__ = _read_docstring % (
+            cls.__name__,
+            _formats_for_docs(read_formats),
+            cls.__name__,
+            cls.__name__,
+            cls.__name__,
+            _import_paths(read_formats)
+        )
+        cls.read = read
+
+
+def _apply_write(cls):
+    """Add write method if any formats have a registered writer for `cls`."""
+    skbio_io_write = globals()['write']
+    write_formats = list_write_formats(cls)
+    if write_formats:
+        if not hasattr(cls, 'default_write_format'):
+            raise NotImplementedError(
+                "Classes with registered writers must provide a "
+                "`default_write_format`. Please add `default_write_format` to"
+                " '%s'." % cls.__name__)
+
+        def write(self, fp, format=cls.default_write_format, **kwargs):
+            skbio_io_write(self, into=fp, format=format, **kwargs)
+
+        write.__doc__ = _write_docstring % (
+            cls.__name__,
+            _formats_for_docs(write_formats),
+            cls.__name__,
+            cls.default_write_format,
+            _import_paths(write_formats)
+        )
+        cls.write = write
+
+
+def _import_paths(formats):
+    lines = []
+    for fmt in formats:
+        lines.append("skbio.io." + fmt)
+    return '\n'.join(lines)
+
+
+def _formats_for_docs(formats):
+    lines = []
+    for fmt in formats:
+        lines.append("- ``'%s'`` (:mod:`skbio.io.%s`)" % (fmt, fmt))
+    return '\n'.join(lines)
+
+
+_read_docstring = """Create a new ``%s`` instance from a file.
+
+This is a convenience method for :mod:`skbio.io.read`. For more
+information about the I/O system in scikit-bio, please see
+:mod:`skbio.io`.
+
+Supported file formats include:
+
+%s
+
+Parameters
+----------
+fp : filepath or filehandle
+    The location to read the given `format`. Filepaths are
+    automatically closed when read; filehandles are the
+    responsibility of the caller.
+format : str, optional
+    The format must be a format name with a reader for ``%s``.
+    If a `format` is not provided or is None, it will attempt to
+    guess the format.
+kwargs : dict, optional
+    Keyword arguments passed to :mod:`skbio.io.read` and the file
+    format reader for ``%s``.
+
+Returns
+-------
+%s
+    A new instance.
+
+See Also
+--------
+write
+skbio.io.read
+%s
+
+"""
+
+_write_docstring = """Write an instance of ``%s`` to a file.
+
+This is a convenience method for :mod:`skbio.io.write`. For more
+information about the I/O system in scikit-bio, please see
+:mod:`skbio.io`.
+
+Supported file formats include:
+
+%s
+
+Parameters
+----------
+fp : filepath or filehandle
+    The location to write the given `format` into. Filepaths are
+    automatically closed when written; filehandles are the
+    responsibility of the caller.
+format : str
+    The format must be a registered format name with a writer for
+    ``%s``.
+    Default is `'%s'`.
+kwargs : dict, optional
+    Keyword arguments passed to :mod:`skbio.io.write` and the
+    file format writer.
+
+See Also
+--------
+read
+skbio.io.write
+%s
+
+"""
diff --git a/skbio/io/_warning.py b/skbio/io/_warning.py
new file mode 100644
index 0000000..b049420
--- /dev/null
+++ b/skbio/io/_warning.py
@@ -0,0 +1,19 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+
+class FormatIdentificationWarning(Warning):
+    """Warn when the sniffer of a format cannot confirm the format."""
+    pass
+
+
+class ArgumentOverrideWarning(Warning):
+    """Warn when a user provided kwarg differs from a guessed kwarg."""
+    pass
diff --git a/skbio/io/clustal.py b/skbio/io/clustal.py
new file mode 100644
index 0000000..c29fa98
--- /dev/null
+++ b/skbio/io/clustal.py
@@ -0,0 +1,328 @@
+r"""
+Clustal format (:mod:`skbio.io.clustal`)
+========================================
+
+.. currentmodule:: skbio.io.clustal
+
+Clustal format (``clustal``) stores multiple sequence alignments. This format
+was originally introduced in the Clustal package [1]_.
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+A clustal-formatted file is a plain text format. It can optionally have a
+header, which states the clustal version number. This is followed by the
+multiple sequence alignment, and optional information about the degree of
+conservation at each position in the alignment [2]_.
+
+Alignment Section
+^^^^^^^^^^^^^^^^^
+Each sequence in the alignment is divided into subsequences each at most
+60 characters long. The sequence identifier for each sequence precedes each
+subsequence. Each subsequence can optionally be followed by the cumulative
+number of non-gap characters up to that point in the full sequence (not
+included in the examples below). A line containing conservation information
+about each position in the alignment can optionally follow all of the
+subsequences (not included in the examples below).
+
+.. note:: scikit-bio does not support writing conservation information
+
+.. note:: scikit-bio will only write a clustal-formatted file if the
+   alignment's sequence characters are valid IUPAC characters, as defined in
+   :mod:`skbio.sequence`. The specific lexicon that is validated against
+   depends on the type of sequences stored in the alignment.
+
+
+Examples
+--------
+
+Assume we have a clustal-formatted file with the following contents::
+
+    CLUSTAL W (1.82) multiple sequence alignment
+
+    abc   GCAUGCAUCUGCAUACGUACGUACGCAUGCAUCA
+    def   ----------------------------------
+    xyz   ----------------------------------
+
+    abc   GUCGAUACAUACGUACGUCGUACGUACGU-CGAC
+    def   ---------------CGCGAUGCAUGCAU-CGAU
+    xyz   -----------CAUGCAUCGUACGUACGCAUGAC
+
+We can use the following code to read a clustal file:
+
+>>> from StringIO import StringIO
+>>> from skbio import read
+>>> from skbio import Alignment
+>>> clustal_f = StringIO('abc   GCAUGCAUCUGCAUACGUACGUACGCAUGCA\n'
+...                      'def   -------------------------------\n'
+...                      'xyz   -------------------------------\n'
+...                      '\n'
+...                      'abc   GUCGAUACAUACGUACGUCGGUACGU-CGAC\n'
+...                      'def   ---------------CGUGCAUGCAU-CGAU\n'
+...                      'xyz   -----------CAUUCGUACGUACGCAUGAC\n')
+>>> for dna in read(clustal_f, format="clustal", into=Alignment):
+...     print(dna.id)
+...     print(dna.sequence)
+abc
+GCAUGCAUCUGCAUACGUACGUACGCAUGCAGUCGAUACAUACGUACGUCGGUACGU-CGAC
+def
+----------------------------------------------CGUGCAUGCAU-CGAU
+xyz
+------------------------------------------CAUUCGUACGUACGCAUGAC
+
+We can use the following code to write to a clustal-formatted file:
+
+>>> from skbio import Alignment, DNA
+>>> from skbio.io import write
+>>> seqs = [DNA('ACCGTTGTA-GTAGCT', id='seq1'),
+...         DNA('A--GTCGAA-GTACCT', id='sequence-2'),
+...         DNA('AGAGTTGAAGGTATCT', id='3')]
+>>> aln = Alignment(seqs)
+>>> from StringIO import StringIO
+>>> fh = StringIO()
+>>> aln.write(fh, format='clustal')
+>>> print(fh.getvalue()) # doctest: +NORMALIZE_WHITESPACE
+CLUSTAL
+<BLANKLINE>
+<BLANKLINE>
+seq1        ACCGTTGTA-GTAGCT
+sequence-2  A--GTCGAA-GTACCT
+3           AGAGTTGAAGGTATCT
+<BLANKLINE>
+<BLANKLINE>
+
+References
+----------
+.. [1] http://www.sciencedirect.com/science/article/pii/0378111988903307
+.. [2] http://web.mit.edu/meme_v4.9.0/doc/clustalw-format.html
+"""
+
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+
+from skbio.parse.record import DelimitedSplitter
+from skbio.io import (register_reader, register_writer, register_sniffer,
+                      ClustalFormatError)
+from skbio.sequence import BiologicalSequence
+from skbio.alignment import Alignment
+
+
+def _label_line_parser(record, splitter, strict=True):
+    """Returns dict mapping list of data to labels, plus list with field order.
+
+    Field order contains labels in order encountered in file.
+
+    NOTE: doesn't care if lines are out of order in different blocks. This
+    should never happen anyway, but it's possible that this behavior should
+    be changed to tighten up validation.
+    """
+    labels = []
+    result = {}
+    for line in record:
+        try:
+            key, val = splitter(line.rstrip())
+        except:
+
+            if strict:
+                raise ClustalFormatError(
+                    "Failed to extract key and value from line %s" %
+                    line)
+            else:
+                continue  # just skip the line if not strict
+
+        if key in result:
+            result[key].append(val)
+        else:
+            result[key] = [val]
+            labels.append(key)
+    return result, labels
+
+
+def _is_clustal_seq_line(line):
+    """Returns True if line starts with a non-blank character but not 'CLUSTAL'
+
+    Useful for filtering other lines out of the file.
+    """
+    return line and (not line[0].isspace()) and\
+        (not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))
+
+last_space = DelimitedSplitter(None, -1)
+
+
+def _delete_trailing_number(line):
+    """Deletes trailing number from a line.
+
+    WARNING: does not preserve internal whitespace when a number is removed!
+    (converts each whitespace run to a single space). Returns the original
+    line if it didn't end in a number.
+    """
+    pieces = line.split()
+    try:
+        int(pieces[-1])
+        return ' '.join(pieces[:-1])
+    except ValueError:  # no trailing numbers
+        return line
+
+
+def _check_length(data, labels, num_seqs_check=None):
+    """
+    Checks the lengths of the clustal sequences to make
+    sure that they are lining up right
+
+    num_seqs_check: The number of sequences to check
+
+    Return True if all of the subsequence lengths are equal
+                or if data is empty
+    Return False if one of the subsequence lengths differs
+    """
+    if len(labels) == 0:
+        return True
+    num_subseqs = len(data[labels[0]])
+    if num_seqs_check is None:
+        num_seqs_check = num_subseqs
+    else:
+        if num_seqs_check > num_subseqs:
+            num_seqs_check = num_subseqs
+
+    subseq_length = len(data[labels[0]][0])
+
+    end_lengths = set()  # subsequence lengths at end of file
+    for i in range(num_seqs_check):
+        for label in labels:
+            seq = data[label][i]
+            if len(seq) > subseq_length:
+                return False
+            elif i+1 == num_subseqs:  # Last subsequence
+                end_lengths.add(len(seq))
+            elif len(seq) < subseq_length:
+                return False
+    # All trailing subsequences must be the same
+    if len(end_lengths) > 1:
+        return False
+    return True
+
+
+ at register_sniffer("clustal")
+def _clustal_sniffer(fh):
+    # Strategy
+    #   The following conditions preclude a file from being clustal
+    #       * It is an empty file
+    #       * The whole sequences have differing lengths
+    #       * The sub-sequences have differing lengths
+    #       * One of the sequence ids is not immediately
+    #         followed by a subsequence
+    empty = True
+    try:
+        records = map(_delete_trailing_number,
+                      filter(_is_clustal_seq_line, fh))
+        data, labels = _label_line_parser(records, last_space, strict=True)
+        if len(data) > 0:
+            empty = False
+        # Only check first 50 sequences
+        aligned_correctly = _check_length(data, labels, 50)
+        if not aligned_correctly:
+            raise ClustalFormatError("Sequences not aligned properly")
+    except ClustalFormatError:
+        return False, {}
+    return not empty, {}
+
+
+ at register_writer('clustal', Alignment)
+def _alignment_to_clustal(obj, fh):
+    r"""writes aligned sequences to a specified file
+    Parameters
+    ----------
+    obj: Alignment object
+        An alignment object containing a set of BiologicalSequence objects
+    fh: open file handle object
+        An open file handle object containing Clustal sequences.
+
+    """
+    clen = 60  # Max length of clustal lines
+    names, seqs = zip(*[(s.id, s.sequence) for s in obj])
+    nameLen = max(map(len, names))
+    seqLen = max(map(len, seqs))
+    fh.write('CLUSTAL\n\n\n')
+    for i in range(0, seqLen, clen):
+        for label, seq in zip(names, seqs):
+            name = ('{:<%d}' % (nameLen)).format(label)
+            fh.write("%s\t%s\n" % (name, seq[i:i+clen]))
+        fh.write("\n")
+
+
+ at register_reader('clustal', Alignment)
+def _clustal_to_alignment(fh, strict=True):
+    r"""yields labels and sequences from msa (multiple sequence alignment)
+
+    Parameters
+    ----------
+
+    fh : open file object
+        An open Clustal file.
+    strict : boolean
+        Whether or not to raise a ``ClustalFormatError``
+        when no labels are found.
+
+    Returns
+    -------
+    skbio.Alignment
+        Alignment object containing aligned biogical sequences
+
+    Raises
+    ------
+        skbio.util.exception.ClustalFormatError
+            If the sequences in `fh` don't have the same sequence length
+            or if the sequence ids don't properly match with the subsequences
+    Notes
+    -----
+
+    Skips any line that starts with a blank.
+
+    ``_clustal_to_alignment`` preserves the order of the sequences from the
+    original file.  However, it does use a dict as an intermediate, so
+    two sequences can't have the same label. This is probably OK since
+    Clustal will refuse to run on a FASTA file in which two sequences have
+    the same label, but could potentially cause trouble with manually
+    edited files (all the segments of the conflicting sequences would
+    be interleaved, possibly in an unpredictable way).
+
+    If the lines have trailing numbers (i.e. Clustal was run with
+    `-LINENOS=ON`), silently deletes them. Does not check that the numbers
+    actually correspond to the number of chars in the sequence printed so far.
+
+    References
+    ----------
+    .. [1] Thompson JD, Higgins DG, Gibson TJ,  "CLUSTAL W: improving the
+        sensitivity of progressive multiple sequence alignment through sequence
+        weighting, position-specific gap penalties and weight matrix choice.
+        Thompson", Nucleic Acids Res. 1994 Nov 11;22(22):4673-80.
+
+    """
+
+    records = map(_delete_trailing_number,
+                  filter(_is_clustal_seq_line, fh))
+    data, labels = _label_line_parser(records, last_space, strict)
+
+    aligned_correctly = _check_length(data, labels)
+    if not aligned_correctly:
+        raise ClustalFormatError("Sequences not aligned properly")
+    alns = []
+    for key in labels:
+        alns.append(BiologicalSequence(id=key, sequence=''.join(data[key])))
+    return Alignment(alns)
diff --git a/skbio/io/fasta.py b/skbio/io/fasta.py
new file mode 100644
index 0000000..ea84ad3
--- /dev/null
+++ b/skbio/io/fasta.py
@@ -0,0 +1,833 @@
+"""
+FASTA/QUAL format (:mod:`skbio.io.fasta`)
+=========================================
+
+.. currentmodule:: skbio.io.fasta
+
+The FASTA file format (``fasta``) stores biological (i.e., nucleotide or
+protein) sequences in a simple plain text format that is both human-readable
+and easy to parse. The file format was first introduced and used in the FASTA
+software package [1]_. Additional descriptions of the file format can be found
+in [2]_ and [3]_.
+
+An example of a FASTA-formatted file containing two DNA sequences::
+
+    >seq1 db-accession-149855
+    CGATGTCGATCGATCGATCGATCAG
+    >seq2 db-accession-34989
+    CATCGATCGATCGATGCATGCATGCATG
+
+The QUAL file format is an additional format related to FASTA. A FASTA file is
+sometimes accompanied by a QUAL file, particuarly when the fasta file contains
+sequences generated on a high-throughput sequencing instrument. QUAL files
+store a Phred quality score (nonnegative integer) for each base in a sequence
+stored in FASTA format (see [4]_ for more details). scikit-bio supports reading
+and writing FASTA (and optionally QUAL) file formats.
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |Yes   |generator of :mod:`skbio.sequence.BiologicalSequence` objects  |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.alignment.SequenceCollection`                      |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.BiologicalSequence`                       |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.NucleotideSequence`                       |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.DNASequence`                              |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.RNASequence`                              |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.ProteinSequence`                          |
++------+------+---------------------------------------------------------------+
+
+.. note:: All readers and writers support an optional QUAL file via the
+   ``qual`` parameter. If one is provided, quality scores will be read/written
+   in addition to FASTA sequence data.
+
+Format Specification
+--------------------
+The following sections define the FASTA and QUAL file formats in detail.
+
+FASTA Format
+^^^^^^^^^^^^
+A FASTA file contains one or more biological sequences. The sequences are
+stored sequentially, with a *record* for each sequence (also referred to as a
+*FASTA record*). Each *record* consists of a single-line *header* (sometimes
+referred to as a *defline*, *label*, *description*, or *comment*) followed by
+the sequence data, optionally split over multiple lines. Blank or
+whitespace-only lines are not allowed anywhere in the FASTA file.
+
+.. note:: scikit-bio does not currently support legacy FASTA format (i.e.,
+   headers/comments denoted with a semicolon). The format supported by
+   scikit-bio (described below in detail) most closely resembles the
+   description given in NCBI's BLAST documentation [3]_. See [2]_ for more
+   details on legacy FASTA format. If you would like legacy FASTA format
+   support added to scikit-bio, please consider submitting a feature request on
+   the
+   `scikit-bio issue tracker <https://github.com/biocore/scikit-bio/issues>`_
+   (pull requests are also welcome!).
+
+Sequence Header
+~~~~~~~~~~~~~~~
+Each sequence header consists of a single line beginning with a greater-than
+(``>``) symbol. Immediately following this is a sequence identifier (ID) and
+description separated by one or more whitespace characters. Both sequence ID
+and description are optional and are represented as the empty string (``''``)
+in scikit-bio's objects if they are not present in the header.
+
+A sequence ID consists of a single *word*: all characters after the greater-
+than symbol and before the first whitespace character (if any) are taken as the
+sequence ID. Unique sequence IDs are not strictly enforced by the FASTA format
+itself. A single standardized ID format is similarly not enforced by FASTA
+format, though it is often common to use a unique library accession number for
+a sequence ID (e.g., NCBI's FASTA defline format [5]_).
+
+.. note:: scikit-bio will enforce sequence ID uniqueness depending on the type
+   of object that the FASTA file is read into. For example, reading a FASTA
+   file as a generator of ``BiologicalSequence`` objects will not enforce
+   unique IDs since it simply yields each sequence it finds in the FASTA file.
+   However, if the FASTA file is read into a ``SequenceCollection`` object, ID
+   uniqueness will be enforced because that is a requirement of a
+   ``SequenceCollection``.
+
+If a description is present, it is taken as the remaining characters that
+follow the sequence ID and initial whitespace(s). The description is considered
+additional information about the sequence (e.g., comments about the source of
+the sequence or the molecule that it encodes).
+
+For example, consider the following header::
+
+    >seq1 db-accession-149855
+
+``seq1`` is the sequence ID and ``db-accession-149855`` is the sequence
+description.
+
+.. note:: scikit-bio's readers will remove all leading and trailing whitespace
+   from the description. If a header line begins with whitespace following the
+   ``>``, the ID is assumed to be missing and the remainder of the line is
+   taken as the description.
+
+Sequence Data
+~~~~~~~~~~~~~
+Biological sequence data follows the header, and can be split over multiple
+lines. The sequence data (i.e., nucleotides or amino acids) are stored using
+the standard IUPAC lexicon (single-letter codes).
+
+.. note:: scikit-bio supports both upper and lower case characters. Both ``-``
+   and ``.`` are supported as gap characters. See :mod:`skbio.sequence` for
+   more details on how scikit-bio interprets sequence data in its in-memory
+   objects.
+
+   scikit-bio will remove leading and trailing whitespace from each line of
+   sequence data before joining the sequence chunks into a single sequence.
+   Whitespace characters are **not** removed from the middle of the sequence
+   chunks. Likewise, other invalid IUPAC characters are **not** removed from
+   the sequence data as it is read. Thus, it is possible to create an invalid
+   in-memory sequence object (see warning below).
+
+.. warning:: In an effort to maintain reasonable performance while reading
+   FASTA files (which can be quite large), validation of sequence data is
+   **not** performed during reading. It is the responsibility of the user to
+   validate their in-memory representation of the data if desired (e.g., by
+   calling ``is_valid`` on the returned object). Thus, it is possible to read
+   invalid characters into objects (e.g. whitespace occurring in the middle of
+   a sequence, or invalid IUPAC DNA characters in a DNA sequence).
+
+QUAL Format
+^^^^^^^^^^^
+A QUAL file contains quality scores for one or more biological sequences stored
+in a corresponding FASTA file. QUAL format is very similar to FASTA format: it
+stores records sequentially, with each record beginning with a header line
+containing a sequence ID and description. The same rules apply to QUAL headers
+as FASTA headers (see the above sections for details). scikit-bio processes
+FASTA and QUAL headers in exactly the same way.
+
+Instead of storing biological sequence data in each record, a QUAL file stores
+a Phred quality score for each base in the corresponding sequence. Quality
+scores are represented as nonnegative integers separated by whitespace
+(typically a single space or newline), and can span multiple lines.
+
+.. note:: When reading FASTA and QUAL files, scikit-bio requires records to be
+   in the same order in both files (i.e., each FASTA and QUAL record must have
+   the same ID and description after being parsed). In addition to having the
+   same order, the number of FASTA records must match the number of QUAL
+   records (i.e., missing or additonal records are not allowed). scikit-bio
+   also requires that the number of quality scores match the number of bases in
+   the corresponding sequence.
+
+   When writing FASTA and QUAL files, scikit-bio will maintain the same
+   ordering of records in both files (i.e., using the same ID and description
+   in both records) to support future reading.
+
+Format Parameters
+-----------------
+The following parameters are available to change how FASTA/QUAL files are read
+or written in scikit-bio.
+
+QUAL File Parameter (Readers and Writers)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The ``qual`` parameter is available to all FASTA format readers and writers. It
+can be any file-like type supported by scikit-bio's I/O registry (e.g., file
+handle, file path, etc.). If ``qual`` is provided when reading, quality scores
+will be included in each in-memory ``BiologicalSequence`` object, in addition
+to sequence data stored in the FASTA file. When writing, quality scores will be
+written in QUAL format in addition to the sequence data being written in FASTA
+format.
+
+Reader-specific Parameters
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+The available reader parameters differ depending on which reader is used.
+
+Generator, SequenceCollection, and Alignment Reader Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``constructor`` parameter can be used with the ``BiologicalSequence``
+generator, ``SequenceCollection``, and ``Alignment`` FASTA readers.
+``constructor`` specifies the in-memory type of each sequence that is parsed,
+and defaults to ``BiologicalSequence``. ``constructor`` should be a subclass of
+``BiologicalSequence``. For example, if you know that the FASTA file you're
+reading contains protein sequences, you would pass
+``constructor=ProteinSequence`` to the reader call.
+
+.. note:: The FASTA sniffer will not attempt to guess the ``constructor``
+   parameter, so it will always default to ``BiologicalSequence`` if another
+   type is not provided to the reader.
+
+BiologicalSequence Reader Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``seq_num`` parameter can be used with the ``BiologicalSequence``,
+``NucleotideSequence``, ``DNASequence``, ``RNASequence``, and
+``ProteinSequence`` FASTA readers. ``seq_num`` specifies which sequence to read
+from the FASTA file (and optional QUAL file), and defaults to 1 (i.e., such
+that the first sequence is read). For example, to read the 50th sequence from a
+FASTA file, you would pass ``seq_num=50`` to the reader call.
+
+Writer-specific Parameters
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+The following parameters are available to all FASTA format writers:
+
+- ``id_whitespace_replacement``: string to replace **each** whitespace
+  character in a sequence ID. This parameter is useful for cases where an
+  in-memory sequence ID contains whitespace, which would result in an on-disk
+  representation that would not be read back into memory as the same ID (since
+  IDs in FASTA format cannot contain whitespace). Defaults to ``_``. If
+  ``None``, no whitespace replacement is performed and IDs are written as they
+  are stored in memory (this has the potential to create an invalid
+  FASTA-formatted file; see note below). This parameter also applies to a QUAL
+  file if one is provided.
+
+- ``description_newline_replacement``: string to replace **each** newline
+  character in a sequence description. Since a FASTA header must be a single
+  line, newlines are not allowed in sequence descriptions and must be replaced
+  in order to write a valid FASTA file. Defaults to a single space. If
+  ``None``, no newline replacement is performed and descriptions are written as
+  they are stored in memory (this has the potential to create an invalid
+  FASTA-formatted file; see note below). This parameter also applies to a QUAL
+  file if one is provided.
+
+- ``max_width``: integer specifying the maximum line width (i.e., number of
+  characters) for sequence data and/or quality scores. If a sequence or its
+  quality scores are longer than ``max_width``, it will be split across
+  multiple lines, each with a maximum width of ``max_width``. Note that there
+  are some caveats when splitting quality scores. A single quality score will
+  *never* be split across multiple lines, otherwise it would become two
+  different quality scores when read again. Thus, splitting only occurs
+  *between* quality scores. This makes it possible to have a single long
+  quality score written on its own line that exceeds ``max_width``. For
+  example, the quality score ``12345`` would not be split across multiple lines
+  even if ``max_width=3``. Thus, a 5-character line would be written. Default
+  behavior is to not split sequence data or quality scores across multiple
+  lines.
+
+.. note:: The FASTA format writers will have noticeably better runtime
+   performance if ``id_whitespace_replacement`` and/or
+   ``description_newline_replacement`` are set to ``None`` so that whitespace
+   replacement is not performed during writing. However, this can potentially
+   create invalid FASTA files, especially if there are newline characters in
+   the IDs or descriptions. For IDs with whitespace, this can also affect how
+   the IDs are read into memory in a subsequent read operation. For example, if
+   an in-memory sequence ID is ``'seq 1'`` and
+   ``id_whitespace_replacement=None``, reading the FASTA file back into memory
+   would result in an ID of ``'seq'``, and ``'1'`` would be part of the
+   sequence description.
+
+Examples
+--------
+
+Reading and Writing FASTA Files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Suppose we have the following FASTA file with five equal-length sequences
+(example modified from [6]_)::
+
+    >seq1 Turkey
+    AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT
+    >seq2 Salmo gair
+    AAGCCTTGGCAGTGCAGGGTGAGCCGTGG
+    CCGGGCACGGTAT
+    >seq3 H. Sapiens
+    ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA
+    >seq4 Chimp
+    AAACCCTTGCCG
+    TTACGCTTAAAC
+    CGAGGCCGGGAC
+    ACTCAT
+    >seq5 Gorilla
+    AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
+
+.. note:: Original copyright notice for the above example file:
+
+   *(c) Copyright 1986-2008 by The University of Washington. Written by Joseph
+   Felsenstein. Permission is granted to copy this document provided that no
+   fee is charged for it and that this copyright notice is not removed.*
+
+Note that the sequences are not required to be of equal length in order for the
+file to be a valid FASTA file (this depends on the object that you're reading
+the file into). Also note that some of the sequences occur on a single line,
+while others are split across multiple lines.
+
+Let's define this file in-memory as a ``StringIO``, though this could be a real
+file path, file handle, or anything that's supported by scikit-bio's I/O
+registry in practice:
+
+>>> from StringIO import StringIO
+>>> fs = (
+...     ">seq1 Turkey\\n"
+...     "AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT\\n"
+...     ">seq2 Salmo gair\\n"
+...     "AAGCCTTGGCAGTGCAGGGTGAGCCGTGG\\n"
+...     "CCGGGCACGGTAT\\n"
+...     ">seq3 H. Sapiens\\n"
+...     "ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA\\n"
+...     ">seq4 Chimp\\n"
+...     "AAACCCTTGCCG\\n"
+...     "TTACGCTTAAAC\\n"
+...     "CGAGGCCGGGAC\\n"
+...     "ACTCAT\\n"
+...     ">seq5 Gorilla\\n"
+...     "AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA\\n")
+>>> fh = StringIO(fs)
+
+Let's read the FASTA file into a ``SequenceCollection``:
+
+>>> from skbio import SequenceCollection
+>>> sc = SequenceCollection.read(fh)
+>>> sc.sequence_lengths()
+[42, 42, 42, 42, 42]
+>>> sc.ids()
+['seq1', 'seq2', 'seq3', 'seq4', 'seq5']
+
+We see that all 5 sequences have 42 characters, and that each of the sequence
+IDs were successfully read into memory.
+
+Since these sequences are of equal length (presumably because they've been
+aligned), let's load the FASTA file into an ``Alignment`` object, which is a
+more appropriate data structure:
+
+>>> from skbio import Alignment
+>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
+>>> aln = Alignment.read(fh)
+>>> aln.sequence_length()
+42
+
+Note that we were able to read the FASTA file into two different data
+structures (``SequenceCollection`` and ``Alignment``) using the exact same
+``read`` method call (and underlying reading/parsing logic). Also note that we
+didn't specify a file format in the ``read`` call. The FASTA sniffer detected
+the correct file format for us!
+
+Let's inspect the type of sequences stored in the ``Alignment``:
+
+>>> aln[0]
+<BiologicalSequence: AAGCTNGGGC... (length: 42)>
+
+By default, sequences are loaded as ``BiologicalSequence`` objects. We can
+change the type of sequence via the ``constructor`` parameter:
+
+>>> from skbio import DNASequence
+>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
+>>> aln = Alignment.read(fh, constructor=DNASequence)
+>>> aln[0]
+<DNASequence: AAGCTNGGGC... (length: 42)>
+
+We now have an ``Alignment`` of ``DNASequence`` objects instead of
+``BiologicalSequence`` objects. Validation of sequence character data is not
+performed during reading (see warning above for details). To verify that each
+of the sequences are valid DNA sequences:
+
+>>> aln.is_valid()
+True
+
+To write the alignment in FASTA format:
+
+>>> new_fh = StringIO()
+>>> aln.write(new_fh)
+>>> print(new_fh.getvalue())
+>seq1 Turkey
+AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT
+>seq2 Salmo gair
+AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT
+>seq3 H. Sapiens
+ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA
+>seq4 Chimp
+AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT
+>seq5 Gorilla
+AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
+<BLANKLINE>
+>>> new_fh.close()
+
+Both ``SequenceCollection`` and ``Alignment`` load all of the sequences from
+the FASTA file into memory at once. If the FASTA file is large (which is often
+the case), this may be infeasible if you don't have enough memory. To work
+around this issue, you can stream the sequences using scikit-bio's
+generator-based FASTA reader and writer. The generator-based reader yields
+``BiologicalSequence`` objects (or subclasses if ``constructor`` is supplied)
+one at a time, instead of loading all sequences into memory. For example, let's
+use the generator-based reader to process a single sequence at a time in a
+``for`` loop:
+
+>>> import skbio.io
+>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
+>>> for seq in skbio.io.read(fh, format='fasta'):
+...     seq
+<BiologicalSequence: AAGCTNGGGC... (length: 42)>
+<BiologicalSequence: AAGCCTTGGC... (length: 42)>
+<BiologicalSequence: ACCGGTTGGC... (length: 42)>
+<BiologicalSequence: AAACCCTTGC... (length: 42)>
+<BiologicalSequence: AAACCCTTGC... (length: 42)>
+
+A single sequence can also be read into a ``BiologicalSequence`` (or subclass):
+
+>>> from skbio import BiologicalSequence
+>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
+>>> BiologicalSequence.read(fh)
+<BiologicalSequence: AAGCTNGGGC... (length: 42)>
+
+By default, the first sequence in the FASTA file is read. This can be
+controlled with ``seq_num``. For example, to read the fifth sequence:
+
+>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
+>>> BiologicalSequence.read(fh, seq_num=5)
+<BiologicalSequence: AAACCCTTGC... (length: 42)>
+
+We can use the same API to read the fifth sequence into a ``DNASequence``:
+
+>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
+>>> dna_seq = DNASequence.read(fh, seq_num=5)
+>>> dna_seq
+<DNASequence: AAACCCTTGC... (length: 42)>
+
+Individual sequence objects can also be written in FASTA format:
+
+>>> new_fh = StringIO()
+>>> dna_seq.write(new_fh)
+>>> print(new_fh.getvalue())
+>seq5 Gorilla
+AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
+<BLANKLINE>
+>>> new_fh.close()
+
+Reading and Writing FASTA/QUAL Files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In addition to reading and writing standalone FASTA files, scikit-bio also
+supports reading and writing FASTA and QUAL files together. Suppose we have the
+following FASTA file::
+
+    >seq1 db-accession-149855
+    CGATGTC
+    >seq2 db-accession-34989
+    CATCG
+
+Also suppose we have the following QUAL file::
+
+    >seq1 db-accession-149855
+    40 39 39 4
+    50 1 100
+    >seq2 db-accession-34989
+    3 3 10 42 80
+
+>>> fasta_fs = (
+...     ">seq1 db-accession-149855\\n"
+...     "CGATGTC\\n"
+...     ">seq2 db-accession-34989\\n"
+...     "CATCG\\n")
+>>> fasta_fh = StringIO(fasta_fs)
+>>> qual_fs = (
+...     ">seq1 db-accession-149855\\n"
+...     "40 39 39 4\\n"
+...     "50 1 100\\n"
+...     ">seq2 db-accession-34989\\n"
+...     "3 3 10 42 80\\n")
+>>> qual_fh = StringIO(qual_fs)
+
+To read in a single ``BiologicalSequence`` at a time, we can use the
+generator-based reader as we did above, providing both FASTA and QUAL files:
+
+>>> for seq in skbio.io.read(fasta_fh, qual=qual_fh, format='fasta'):
+...     seq
+...     seq.quality
+<BiologicalSequence: CGATGTC (length: 7)>
+array([ 40,  39,  39,   4,  50,   1, 100])
+<BiologicalSequence: CATCG (length: 5)>
+array([ 3,  3, 10, 42, 80])
+
+Note that the sequence objects have quality scores since we provided a QUAL
+file. The other FASTA readers operate in a similar manner.
+
+Now let's load the sequences and their quality scores into a
+``SequenceCollection``:
+
+>>> fasta_fh = StringIO(fasta_fs) # reload to read from the beginning again
+>>> qual_fh = StringIO(qual_fs) # reload to read from the beginning again
+>>> sc = SequenceCollection.read(fasta_fh, qual=qual_fh)
+>>> sc
+<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
+
+To write the sequence data and quality scores in the ``SequenceCollection`` to
+FASTA and QUAL files, respectively, we run:
+
+>>> new_fasta_fh = StringIO()
+>>> new_qual_fh = StringIO()
+>>> sc.write(new_fasta_fh, qual=new_qual_fh)
+>>> print(new_fasta_fh.getvalue())
+>seq1 db-accession-149855
+CGATGTC
+>seq2 db-accession-34989
+CATCG
+<BLANKLINE>
+>>> print(new_qual_fh.getvalue())
+>seq1 db-accession-149855
+40 39 39 4 50 1 100
+>seq2 db-accession-34989
+3 3 10 42 80
+<BLANKLINE>
+>>> new_fasta_fh.close()
+>>> new_qual_fh.close()
+
+References
+----------
+.. [1] Lipman, DJ; Pearson, WR (1985). "Rapid and sensitive protein similarity
+   searches". Science 227 (4693): 1435-41.
+.. [2] http://en.wikipedia.org/wiki/FASTA_format
+.. [3] http://blast.ncbi.nlm.nih.gov/blastcgihelp.shtml
+.. [4] https://www.broadinstitute.org/crd/wiki/index.php/Qual
+.. [5] Madden T. The BLAST Sequence Analysis Tool. 2002 Oct 9
+   [Updated 2003 Aug 13]. In: McEntyre J, Ostell J, editors. The NCBI Handbook
+   [Internet]. Bethesda (MD): National Center for Biotechnology Information
+   (US); 2002-. Chapter 16. Available from:
+   http://www.ncbi.nlm.nih.gov/books/NBK21097/
+.. [6] http://evolution.genetics.washington.edu/phylip/doc/sequence.html
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range, zip
+from future.standard_library import hooks
+
+import textwrap
+
+import numpy as np
+
+from skbio.io import (register_reader, register_writer, register_sniffer,
+                      FASTAFormatError, FileSentinel)
+from skbio.io._base import (_chunk_str, _get_nth_sequence,
+                            _parse_fasta_like_header,
+                            _format_fasta_like_records)
+from skbio.alignment import SequenceCollection, Alignment
+from skbio.sequence import (BiologicalSequence, NucleotideSequence,
+                            DNASequence, RNASequence, ProteinSequence)
+
+with hooks():
+    from itertools import zip_longest
+
+
+ at register_sniffer('fasta')
+def _fasta_sniffer(fh):
+    # Strategy:
+    #   Read up to 10 FASTA records. If at least one record is read (i.e. the
+    #   file isn't empty) and no errors are thrown during reading, assume the
+    #   file is in FASTA format. Next, try to parse the file as QUAL, which has
+    #   stricter requirements. If this succeeds, do *not* identify the file as
+    #   FASTA since we don't want to sniff QUAL files as FASTA (technically
+    #   they can be read as FASTA since the sequences aren't validated but it
+    #   probably isn't what the user wanted). Also, if we add QUAL as its own
+    #   file format in the future, we wouldn't want the FASTA and QUAL sniffers
+    #   to both identify a QUAL file.
+    num_records = 10
+    try:
+        not_empty = False
+        for _ in zip(range(num_records), _fasta_to_generator(fh)):
+            not_empty = True
+
+        if not_empty:
+            fh.seek(0)
+            try:
+                list(zip(range(num_records),
+                         _parse_fasta_raw(fh, _parse_quality_scores, 'QUAL')))
+            except FASTAFormatError:
+                return True, {}
+            else:
+                return False, {}
+        else:
+            return False, {}
+    except FASTAFormatError:
+        return False, {}
+
+
+ at register_reader('fasta')
+def _fasta_to_generator(fh, qual=FileSentinel, constructor=BiologicalSequence):
+    if qual is None:
+        for seq, id_, desc in _parse_fasta_raw(fh, _parse_sequence_data,
+                                               'FASTA'):
+            yield constructor(seq, id=id_, description=desc)
+    else:
+        fasta_gen = _parse_fasta_raw(fh, _parse_sequence_data, 'FASTA')
+        qual_gen = _parse_fasta_raw(qual, _parse_quality_scores, 'QUAL')
+
+        for fasta_rec, qual_rec in zip_longest(fasta_gen, qual_gen,
+                                               fillvalue=None):
+            if fasta_rec is None:
+                raise FASTAFormatError(
+                    "QUAL file has more records than FASTA file.")
+            if qual_rec is None:
+                raise FASTAFormatError(
+                    "FASTA file has more records than QUAL file.")
+
+            fasta_seq, fasta_id, fasta_desc = fasta_rec
+            qual_scores, qual_id, qual_desc = qual_rec
+
+            if fasta_id != qual_id:
+                raise FASTAFormatError(
+                    "IDs do not match between FASTA and QUAL records: %r != %r"
+                    % (fasta_id, qual_id))
+            if fasta_desc != qual_desc:
+                raise FASTAFormatError(
+                    "Descriptions do not match between FASTA and QUAL "
+                    "records: %r != %r" % (fasta_desc, qual_desc))
+
+            # sequence and quality scores lengths are checked in constructor
+            yield constructor(fasta_seq, id=fasta_id, description=fasta_desc,
+                              quality=qual_scores)
+
+
+ at register_reader('fasta', BiologicalSequence)
+def _fasta_to_biological_sequence(fh, qual=FileSentinel, seq_num=1):
+    return _get_nth_sequence(
+        _fasta_to_generator(fh, qual=qual, constructor=BiologicalSequence),
+        seq_num)
+
+
+ at register_reader('fasta', NucleotideSequence)
+def _fasta_to_nucleotide_sequence(fh, qual=FileSentinel, seq_num=1):
+    return _get_nth_sequence(
+        _fasta_to_generator(fh, qual=qual, constructor=NucleotideSequence),
+        seq_num)
+
+
+ at register_reader('fasta', DNASequence)
+def _fasta_to_dna_sequence(fh, qual=FileSentinel, seq_num=1):
+    return _get_nth_sequence(
+        _fasta_to_generator(fh, qual=qual, constructor=DNASequence),
+        seq_num)
+
+
+ at register_reader('fasta', RNASequence)
+def _fasta_to_rna_sequence(fh, qual=FileSentinel, seq_num=1):
+    return _get_nth_sequence(
+        _fasta_to_generator(fh, qual=qual, constructor=RNASequence),
+        seq_num)
+
+
+ at register_reader('fasta', ProteinSequence)
+def _fasta_to_protein_sequence(fh, qual=FileSentinel, seq_num=1):
+    return _get_nth_sequence(
+        _fasta_to_generator(fh, qual=qual, constructor=ProteinSequence),
+        seq_num)
+
+
+ at register_reader('fasta', SequenceCollection)
+def _fasta_to_sequence_collection(fh, qual=FileSentinel,
+                                  constructor=BiologicalSequence):
+    return SequenceCollection(
+        list(_fasta_to_generator(fh, qual=qual, constructor=constructor)))
+
+
+ at register_reader('fasta', Alignment)
+def _fasta_to_alignment(fh, qual=FileSentinel, constructor=BiologicalSequence):
+    return Alignment(
+        list(_fasta_to_generator(fh, qual=qual, constructor=constructor)))
+
+
+ at register_writer('fasta')
+def _generator_to_fasta(obj, fh, qual=FileSentinel,
+                        id_whitespace_replacement='_',
+                        description_newline_replacement=' ', max_width=None):
+    if max_width is not None:
+        if max_width < 1:
+            raise ValueError(
+                "Maximum line width must be greater than zero (max_width=%d)."
+                % max_width)
+        if qual is not None:
+            # define text wrapper for splitting quality scores here for
+            # efficiency. textwrap docs recommend reusing a TextWrapper
+            # instance when it is used many times. configure text wrapper to
+            # never break "words" (i.e., integer quality scores) across lines
+            qual_wrapper = textwrap.TextWrapper(
+                width=max_width, break_long_words=False,
+                break_on_hyphens=False)
+
+    formatted_records = _format_fasta_like_records(
+        obj, id_whitespace_replacement, description_newline_replacement,
+        qual is not None)
+    for header, seq_str, qual_scores in formatted_records:
+        if max_width is not None:
+            seq_str = _chunk_str(seq_str, max_width, '\n')
+
+        fh.write('>%s\n%s\n' % (header, seq_str))
+
+        if qual is not None:
+            qual_str = ' '.join(np.asarray(qual_scores, dtype=np.str))
+            if max_width is not None:
+                qual_str = qual_wrapper.fill(qual_str)
+            qual.write('>%s\n%s\n' % (header, qual_str))
+
+
+ at register_writer('fasta', BiologicalSequence)
+def _biological_sequence_to_fasta(obj, fh, qual=FileSentinel,
+                                  id_whitespace_replacement='_',
+                                  description_newline_replacement=' ',
+                                  max_width=None):
+    _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width)
+
+
+ at register_writer('fasta', NucleotideSequence)
+def _nucleotide_sequence_to_fasta(obj, fh, qual=FileSentinel,
+                                  id_whitespace_replacement='_',
+                                  description_newline_replacement=' ',
+                                  max_width=None):
+    _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width)
+
+
+ at register_writer('fasta', DNASequence)
+def _dna_sequence_to_fasta(obj, fh, qual=FileSentinel,
+                           id_whitespace_replacement='_',
+                           description_newline_replacement=' ',
+                           max_width=None):
+    _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width)
+
+
+ at register_writer('fasta', RNASequence)
+def _rna_sequence_to_fasta(obj, fh, qual=FileSentinel,
+                           id_whitespace_replacement='_',
+                           description_newline_replacement=' ',
+                           max_width=None):
+    _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width)
+
+
+ at register_writer('fasta', ProteinSequence)
+def _protein_sequence_to_fasta(obj, fh, qual=FileSentinel,
+                               id_whitespace_replacement='_',
+                               description_newline_replacement=' ',
+                               max_width=None):
+    _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width)
+
+
+ at register_writer('fasta', SequenceCollection)
+def _sequence_collection_to_fasta(obj, fh, qual=FileSentinel,
+                                  id_whitespace_replacement='_',
+                                  description_newline_replacement=' ',
+                                  max_width=None):
+    _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width)
+
+
+ at register_writer('fasta', Alignment)
+def _alignment_to_fasta(obj, fh, qual=FileSentinel,
+                        id_whitespace_replacement='_',
+                        description_newline_replacement=' ', max_width=None):
+    _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width)
+
+
+def _parse_fasta_raw(fh, data_parser, format_label):
+    """Raw parser for FASTA or QUAL files.
+
+    Returns raw values (seq/qual, id, description). It is the responsibility of
+    the caller to construct the correct in-memory object to hold the data.
+
+    """
+    line = next(fh)
+    # header check inlined here and below for performance
+    if line.startswith('>'):
+        id_, desc = _parse_fasta_like_header(line)
+    else:
+        raise FASTAFormatError(
+            "Found line without a header in %s-formatted file:\n%s" %
+            (format_label, line))
+
+    data_chunks = []
+    for line in fh:
+        if line.startswith('>'):
+            # new header, so yield current record and reset state
+            yield data_parser(data_chunks), id_, desc
+            data_chunks = []
+            id_, desc = _parse_fasta_like_header(line)
+        else:
+            line = line.strip()
+            if line:
+                data_chunks.append(line)
+            else:
+                raise FASTAFormatError(
+                    "Found blank or whitespace-only line in %s-formatted "
+                    "file." % format_label)
+    # yield last record in file
+    yield data_parser(data_chunks), id_, desc
+
+
+def _parse_sequence_data(chunks):
+    if not chunks:
+        raise FASTAFormatError("Found FASTA header without sequence data.")
+    return ''.join(chunks)
+
+
+def _parse_quality_scores(chunks):
+    if not chunks:
+        raise FASTAFormatError("Found QUAL header without quality scores.")
+
+    qual_str = ' '.join(chunks)
+    try:
+        return np.asarray(qual_str.split(), dtype=int)
+    except ValueError:
+        raise FASTAFormatError(
+            "Could not convert quality scores to integers:\n%s" % qual_str)
+
+
+def _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
+                        description_newline_replacement, max_width):
+    def seq_gen():
+        for seq in obj:
+            yield seq
+
+    _generator_to_fasta(
+        seq_gen(), fh, qual=qual,
+        id_whitespace_replacement=id_whitespace_replacement,
+        description_newline_replacement=description_newline_replacement,
+        max_width=max_width)
diff --git a/skbio/io/fastq.py b/skbio/io/fastq.py
new file mode 100644
index 0000000..7a25b4d
--- /dev/null
+++ b/skbio/io/fastq.py
@@ -0,0 +1,514 @@
+r"""
+FASTQ format (:mod:`skbio.io.fastq`)
+====================================
+
+.. currentmodule:: skbio.io.fastq
+
+The FASTQ file format (``fastq``) stores biological (e.g., nucleotide)
+sequences and their quality scores in a simple plain text format that is both
+human-readable and easy to parse. The file format was invented by Jim Mullikin
+at the Wellcome Trust Sanger Institute but wasn't given a formal definition,
+though it has informally become a standard file format for storing
+high-throughput sequence data. More information about the format and its
+variants can be found in [1]_ and [2]_.
+
+Conceptually, a FASTQ file is similar to paired FASTA and QUAL files in that it
+stores both biological sequences and their quality scores. FASTQ differs from
+FASTA/QUAL because the quality scores are stored in the same file as the
+biological sequence data.
+
+An example FASTQ-formatted file containing two DNA sequences and their quality
+scores::
+
+    @seq1 description 1
+    AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+    +
+    ````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
+    @seq2 description 2
+    TATGTATATATAACATATACATATATACATACATA
+    +
+    ]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |Yes   |generator of :mod:`skbio.sequence.BiologicalSequence` objects  |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.alignment.SequenceCollection`                      |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.BiologicalSequence`                       |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.NucleotideSequence`                       |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.DNASequence`                              |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.RNASequence`                              |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.sequence.ProteinSequence`                          |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+A FASTQ file contains one or more biological sequences and their corresponding
+quality scores stored sequentially as *records*. Each *record* consists of four
+sections:
+
+1. Sequence header line consisting of a sequence identifier (ID) and
+   description (both optional)
+2. Biological sequence data (typically stored using the standard IUPAC
+   lexicon), optionally split over multiple lines
+3. Quality header line separating sequence data from quality scores (optionally
+   repeating the ID and description from the sequence header line)
+4. Quality scores as printable ASCII characters, optionally split over multiple
+   lines. Decoding of quality scores will depend on the specified FASTQ variant
+   (see below for more details)
+
+For the complete FASTQ format specification, see [1]_. scikit-bio's FASTQ
+implementation follows the format specification described in this excellent
+publication, including validating the implementation against the FASTQ examples
+provided in the publication's supplementary data.
+
+.. note:: IDs and descriptions will be parsed from sequence header lines in
+   exactly the same way as FASTA headers (:mod:`skbio.io.fasta`).
+
+   Whitespace is not allowed in sequence data or quality scores. Leading and
+   trailing whitespace is not stripped from sequence data or quality scores,
+   resulting in an error being raised if found.
+
+   scikit-bio will write FASTQ files in a normalized format, with each record
+   section on a single line. Thus, each record will be composed of *exactly*
+   four lines. The quality header line won't have the sequence ID and
+   description repeated.
+
+Quality Score Variants
+^^^^^^^^^^^^^^^^^^^^^^
+FASTQ associates quality scores with sequence data, with each quality score
+encoded as a single printable ASCII character. In scikit-bio, all quality
+scores are decoded as Phred quality scores. This is the most common quality
+score metric, though there are others (e.g., Solexa quality scores).
+Unfortunately, different sequencers have different ways of encoding quality
+scores as ASCII characters, notably Sanger and Illumina. Below is a table
+highlighting the different encoding variants supported by scikit-bio, as well
+as listing the equivalent variant names used in the Open Bioinformatics
+Foundation (OBF) [3]_ projects (e.g., Biopython, BioPerl, etc.).
+
++-----------+---------+----+--------+-----------------------------------------+
+| Variant   | ASCII   |Off\|Quality | Notes                                   |
+|           | Range   |set |Range   |                                         |
++===========+=========+====+========+=========================================+
+|sanger     |33 to 126|33  |0 to 93 |Equivalent to OBF's fastq-sanger.        |
++-----------+---------+----+--------+-----------------------------------------+
+|illumina1.3|64 to 126|64  |0 to 62 |Equivalent to OBF's fastq-illumina. Use  |
+|           |         |    |        |this if your data was generated using    |
+|           |         |    |        |Illumina 1.3-1.7 software.               |
++-----------+---------+----+--------+-----------------------------------------+
+|illumina1.8|33 to 95 |33  |0 to 62 |Equivalent to sanger but with 0 to 62    |
+|           |         |    |        |quality score range check. Use this if   |
+|           |         |    |        |your data was generated using Illumina   |
+|           |         |    |        |1.8 software or later.                   |
++-----------+---------+----+--------+-----------------------------------------+
+|solexa     |59 to 126|64  |-5 to 62|Not currently implemented.               |
++-----------+---------+----+--------+-----------------------------------------+
+
+.. note:: When writing, Phred quality scores will be truncated to the maximum
+   value in the variant's range and a warning will be issued. This is
+   consistent with the OBF projects.
+
+   When reading, an error will be raised if a decoded quality score is outside
+   the variant's range.
+
+Format Parameters
+-----------------
+The following parameters are available to all FASTQ format readers and writers:
+
+- ``variant``: A string indicating the quality score variant used to
+  decode/encode Phred quality scores. Must be one of ``sanger``,
+  ``illumina1.3``, ``illumina1.8``, or ``solexa``. This parameter is preferred
+  over ``phred_offset`` because additional quality score range checks and
+  conversions can be performed. It is also more explicit.
+
+- ``phred_offset``: An integer indicating the ASCII code offset used to
+  decode/encode Phred quality scores. Must be in the range ``[33, 126]``. All
+  decoded scores will be assumed to be Phred scores (i.e., no additional
+  conversions are performed). Prefer using ``variant`` over this parameter
+  whenever possible.
+
+.. note:: You must provide ``variant`` or ``phred_offset`` when reading or
+   writing a FASTQ file. ``variant`` and ``phred_offset`` cannot both be
+   provided at the same time.
+
+The following additional parameters are the same as in FASTA format
+(:mod:`skbio.io.fasta`):
+
+- ``constructor``: see ``constructor`` parameter in FASTA format
+
+- ``seq_num``: see ``seq_num`` parameter in FASTA format
+
+- ``id_whitespace_replacement``: see ``id_whitespace_replacement`` parameter in
+  FASTA format
+
+- ``description_newline_replacement``: see ``description_newline_replacement``
+  parameter in FASTA format
+
+Examples
+--------
+Suppose we have the following FASTQ file with two DNA sequences::
+
+    @seq1 description 1
+    AACACCAAACTTCTCCACC
+    ACGTGAGCTACAAAAGGGT
+    +seq1 description 1
+    ''''Y^T]']C^CABCACC
+    `^LB^CCYT\T\Y\WF^^^
+    @seq2 description 2
+    TATGTATATATAACATATACATATATACATACATA
+    +
+    ]KZ[PY]_[YY^'''AC^\\'BT''C'\AT''BBB
+
+Note that the first sequence and its quality scores are split across multiple
+lines, while the second sequence and its quality scores are each on a single
+line. Also note that the first sequence has a duplicate ID and description on
+the quality header line, while the second sequence does not.
+
+Let's define this file in-memory as a ``StringIO``, though this could be a real
+file path, file handle, or anything that's supported by scikit-bio's I/O
+registry in practice:
+
+>>> from StringIO import StringIO
+>>> fs = '\n'.join([
+...     r"@seq1 description 1",
+...     r"AACACCAAACTTCTCCACC",
+...     r"ACGTGAGCTACAAAAGGGT",
+...     r"+seq1 description 1",
+...     r"''''Y^T]']C^CABCACC",
+...     r"'^LB^CCYT\T\Y\WF^^^",
+...     r"@seq2 description 2",
+...     r"TATGTATATATAACATATACATATATACATACATA",
+...     r"+",
+...     r"]KZ[PY]_[YY^'''AC^\\'BT''C'\AT''BBB"])
+>>> fh = StringIO(fs)
+
+To load the sequences into a ``SequenceCollection``, we run:
+
+>>> from skbio import SequenceCollection
+>>> sc = SequenceCollection.read(fh, variant='sanger')
+>>> sc
+<SequenceCollection: n=2; mean +/- std length=36.50 +/- 1.50>
+
+Note that quality scores are decoded from Sanger. To load the second sequence
+as a ``DNASequence``:
+
+>>> from skbio import DNASequence
+>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
+>>> DNASequence.read(fh, variant='sanger', seq_num=2)
+<DNASequence: TATGTATATA... (length: 35)>
+
+To write our ``SequenceCollection`` to a FASTQ file with quality scores encoded
+using the ``illumina1.3`` variant:
+
+>>> new_fh = StringIO()
+>>> sc.write(new_fh, format='fastq', variant='illumina1.3')
+>>> print(new_fh.getvalue())
+ at seq1 description 1
+AACACCAAACTTCTCCACCACGTGAGCTACAAAAGGGT
++
+FFFFx}s|F|b}b`ab`bbF}ka}bbxs{s{x{ve}}}
+ at seq2 description 2
+TATGTATATATAACATATACATATATACATACATA
++
+|jyzox|~zxx}FFF`b}{{FasFFbF{`sFFaaa
+<BLANKLINE>
+>>> new_fh.close()
+
+Note that the file has been written in normalized format: sequence and quality
+scores each only occur on a single line and the sequence header line is
+not repeated in the quality header line. Note also that the quality scores are
+different because they have been encoded using a different variant.
+
+References
+----------
+.. [1] Peter J. A. Cock, Christopher J. Fields, Naohisa Goto, Michael L. Heuer,
+   and Peter M. Rice. The Sanger FASTQ file format for sequences with quality
+   scores, and the Solexa/Illumina FASTQ variants. Nucl. Acids Res. (2010) 38
+   (6): 1767-1771. first published online December 16, 2009.
+   doi:10.1093/nar/gkp1137
+   http://nar.oxfordjournals.org/content/38/6/1767
+.. [2] http://en.wikipedia.org/wiki/FASTQ_format
+.. [3] http://www.open-bio.org/
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range, zip
+
+import re
+from skbio.io import (register_reader, register_writer, register_sniffer,
+                      FASTQFormatError)
+from skbio.io._base import (_decode_qual_to_phred, _encode_phred_to_qual,
+                            _get_nth_sequence, _parse_fasta_like_header,
+                            _format_fasta_like_records)
+from skbio.alignment import SequenceCollection, Alignment
+from skbio.sequence import (BiologicalSequence, NucleotideSequence,
+                            DNASequence, RNASequence, ProteinSequence)
+
+_whitespace_regex = re.compile(r'\s')
+
+
+ at register_sniffer('fastq')
+def _fastq_sniffer(fh):
+    # Strategy:
+    #   Read up to 10 records. If at least one record is read (i.e. the file
+    #   isn't empty) and the quality scores are in printable ASCII range,
+    #   assume the file is FASTQ.
+    try:
+        not_empty = False
+        for _ in zip(range(10), _fastq_to_generator(fh, phred_offset=33)):
+            not_empty = True
+        return not_empty, {}
+    except (FASTQFormatError, ValueError):
+        return False, {}
+
+
+ at register_reader('fastq')
+def _fastq_to_generator(fh, variant=None, phred_offset=None,
+                        constructor=BiologicalSequence):
+    seq_header = next(_line_generator(fh))
+    if not seq_header.startswith('@'):
+        raise FASTQFormatError(
+            "Expected sequence (@) header line at start of file: %r"
+            % seq_header)
+
+    while seq_header is not None:
+        id_, desc = _parse_fasta_like_header(seq_header)
+        seq, qual_header = _parse_sequence_data(fh)
+
+        if qual_header != '+' and qual_header[1:] != seq_header[1:]:
+            raise FASTQFormatError(
+                "Sequence (@) and quality (+) header lines do not match: "
+                "%r != %r" % (seq_header[1:], qual_header[1:]))
+
+        phred_scores, seq_header = _parse_quality_scores(fh, len(seq), variant,
+                                                         phred_offset)
+        yield constructor(seq, id=id_, description=desc, quality=phred_scores)
+
+
+ at register_reader('fastq', BiologicalSequence)
+def _fastq_to_biological_sequence(fh, variant=None, phred_offset=None,
+                                  seq_num=1):
+    return _get_nth_sequence(
+        _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
+                            constructor=BiologicalSequence),
+        seq_num)
+
+
+ at register_reader('fastq', NucleotideSequence)
+def _fastq_to_nucleotide_sequence(fh, variant=None, phred_offset=None,
+                                  seq_num=1):
+    return _get_nth_sequence(
+        _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
+                            constructor=NucleotideSequence),
+        seq_num)
+
+
+ at register_reader('fastq', DNASequence)
+def _fastq_to_dna_sequence(fh, variant=None, phred_offset=None, seq_num=1):
+    return _get_nth_sequence(
+        _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
+                            constructor=DNASequence),
+        seq_num)
+
+
+ at register_reader('fastq', RNASequence)
+def _fastq_to_rna_sequence(fh, variant=None, phred_offset=None, seq_num=1):
+    return _get_nth_sequence(
+        _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
+                            constructor=RNASequence),
+        seq_num)
+
+
+ at register_reader('fastq', ProteinSequence)
+def _fastq_to_protein_sequence(fh, variant=None, phred_offset=None, seq_num=1):
+    return _get_nth_sequence(
+        _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
+                            constructor=ProteinSequence),
+        seq_num)
+
+
+ at register_reader('fastq', SequenceCollection)
+def _fastq_to_sequence_collection(fh, variant=None, phred_offset=None,
+                                  constructor=BiologicalSequence):
+    return SequenceCollection(
+        list(_fastq_to_generator(fh, variant=variant,
+                                 phred_offset=phred_offset,
+                                 constructor=constructor)))
+
+
+ at register_reader('fastq', Alignment)
+def _fastq_to_alignment(fh, variant=None, phred_offset=None,
+                        constructor=BiologicalSequence):
+    return Alignment(
+        list(_fastq_to_generator(fh, variant=variant,
+                                 phred_offset=phred_offset,
+                                 constructor=constructor)))
+
+
+ at register_writer('fastq')
+def _generator_to_fastq(obj, fh, variant=None, phred_offset=None,
+                        id_whitespace_replacement='_',
+                        description_newline_replacement=' '):
+    formatted_records = _format_fasta_like_records(
+        obj, id_whitespace_replacement, description_newline_replacement, True)
+    for header, seq_str, qual_scores in formatted_records:
+        qual_str = _encode_phred_to_qual(qual_scores, variant=variant,
+                                         phred_offset=phred_offset)
+        fh.write('@')
+        fh.write(header)
+        fh.write('\n')
+        fh.write(seq_str)
+        fh.write('\n+\n')
+        fh.write(qual_str)
+        fh.write('\n')
+
+
+ at register_writer('fastq', BiologicalSequence)
+def _biological_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
+                                  id_whitespace_replacement='_',
+                                  description_newline_replacement=' '):
+    _sequences_to_fastq([obj], fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement)
+
+
+ at register_writer('fastq', NucleotideSequence)
+def _nucleotide_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
+                                  id_whitespace_replacement='_',
+                                  description_newline_replacement=' '):
+    _sequences_to_fastq([obj], fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement)
+
+
+ at register_writer('fastq', DNASequence)
+def _dna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
+                           id_whitespace_replacement='_',
+                           description_newline_replacement=' '):
+    _sequences_to_fastq([obj], fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement)
+
+
+ at register_writer('fastq', RNASequence)
+def _rna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
+                           id_whitespace_replacement='_',
+                           description_newline_replacement=' '):
+    _sequences_to_fastq([obj], fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement)
+
+
+ at register_writer('fastq', ProteinSequence)
+def _protein_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
+                               id_whitespace_replacement='_',
+                               description_newline_replacement=' '):
+    _sequences_to_fastq([obj], fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement)
+
+
+ at register_writer('fastq', SequenceCollection)
+def _sequence_collection_to_fastq(obj, fh, variant=None, phred_offset=None,
+                                  id_whitespace_replacement='_',
+                                  description_newline_replacement=' '):
+    _sequences_to_fastq(obj, fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement)
+
+
+ at register_writer('fastq', Alignment)
+def _alignment_to_fastq(obj, fh, variant=None, phred_offset=None,
+                        id_whitespace_replacement='_',
+                        description_newline_replacement=' '):
+    _sequences_to_fastq(obj, fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement)
+
+
+def _line_generator(fh):
+    for line in fh:
+        line = line.rstrip('\n')
+        if not line:
+            raise FASTQFormatError("Found blank line in FASTQ-formatted file.")
+        yield line
+
+
+def _parse_sequence_data(fh):
+    seq_chunks = []
+    for chunk in _line_generator(fh):
+        if chunk.startswith('+'):
+            if not seq_chunks:
+                raise FASTQFormatError(
+                    "Found FASTQ record without sequence data.")
+            return ''.join(seq_chunks), chunk
+        elif chunk.startswith('@'):
+            raise FASTQFormatError(
+                "Found FASTQ record that is missing a quality (+) header line "
+                "after sequence data.")
+        else:
+            if _whitespace_regex.search(chunk):
+                raise FASTQFormatError(
+                    "Found whitespace in sequence data: %r" % chunk)
+            seq_chunks.append(chunk)
+
+    raise FASTQFormatError(
+        "Found incomplete/truncated FASTQ record at end of file.")
+
+
+def _parse_quality_scores(fh, seq_len, variant, phred_offset):
+    phred_scores = []
+    qual_len = 0
+    for chunk in _line_generator(fh):
+        if chunk.startswith('@') and qual_len == seq_len:
+            return phred_scores, chunk
+        else:
+            qual_len += len(chunk)
+
+            if qual_len > seq_len:
+                raise FASTQFormatError(
+                    "Found more quality score characters than sequence "
+                    "characters. Extra quality score characters: %r" %
+                    chunk[-(qual_len - seq_len):])
+
+            phred_scores.extend(
+                _decode_qual_to_phred(chunk, variant=variant,
+                                      phred_offset=phred_offset))
+
+    if qual_len != seq_len:
+        raise FASTQFormatError(
+            "Found incomplete/truncated FASTQ record at end of file.")
+    return phred_scores, None
+
+
+def _sequences_to_fastq(obj, fh, variant, phred_offset,
+                        id_whitespace_replacement,
+                        description_newline_replacement):
+    def seq_gen():
+        for seq in obj:
+            yield seq
+
+    _generator_to_fastq(
+        seq_gen(), fh, variant=variant, phred_offset=phred_offset,
+        id_whitespace_replacement=id_whitespace_replacement,
+        description_newline_replacement=description_newline_replacement)
diff --git a/skbio/io/lsmat.py b/skbio/io/lsmat.py
new file mode 100644
index 0000000..0f54d4e
--- /dev/null
+++ b/skbio/io/lsmat.py
@@ -0,0 +1,231 @@
+"""
+Labeled square matrix format (:mod:`skbio.io.lsmat`)
+====================================================
+
+.. currentmodule:: skbio.io.lsmat
+
+The labeled square matrix file format (``lsmat``) stores numeric square
+matrix data relating a set of objects along each axis. The format also stores
+identifiers (i.e., unique labels) for the objects. The matrix data and
+identifiers are stored in delimited text format (e.g., TSV or CSV). This format
+supports storing a variety of data types including dissimilarity/distance
+matrices, similarity matrices and amino acid substitution matrices.
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |Yes   |:mod:`skbio.stats.distance.DissimilarityMatrix`                |
++------+------+---------------------------------------------------------------+
+|Yes   |Yes   |:mod:`skbio.stats.distance.DistanceMatrix`                     |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+The labeled square matrix and object identifiers are stored as delimited text.
+The first line of the file is the header, which must start with the delimiter,
+followed by the IDs for all objects in the matrix. Each of the following lines
+must contain an object's ID, followed by a numeric (float or integer) vector
+relating the object to all other objects in the matrix. The order of objects is
+determined by the IDs in the header.
+
+For example, assume we have a 2x2 distance matrix with IDs ``'a'`` and ``'b'``.
+When serialized in this format, the distance matrix might look like::
+
+    <del>a<del>b
+    a<del>0.0<del>1.0
+    b<del>1.0<del>0.0
+
+where ``<del>`` is the delimiter between elements.
+
+Lines containing only whitespace may occur anywhere throughout the file and are
+ignored. Lines starting with ``#`` are treated as comments and are ignored.
+Comments may only occur *before* the header.
+
+IDs will have any leading/trailing whitespace removed when they are parsed.
+
+.. note:: This file format is most useful for storing small matrices, or when
+   it is desirable to represent the matrix in a human-readable format, or
+   easily import the file into another program that supports delimited text
+   (e.g., a spreadsheet program). If efficiency is a concern, this format may
+   not be the most appropriate choice.
+
+Format Parameters
+-----------------
+The only supported format parameter is ``delimiter``, which defaults to the tab
+character (``'\\t'``). ``delimiter`` is used to separate elements in the file
+format. ``delimiter`` can be specified as a keyword argument when reading from
+or writing to a file.
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import csv
+
+import numpy as np
+
+from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
+from skbio.io import (register_reader, register_writer, register_sniffer,
+                      LSMatFormatError)
+
+
+ at register_sniffer('lsmat')
+def _lsmat_sniffer(fh):
+    header = _find_header(fh)
+
+    if header is not None:
+        try:
+            dialect = csv.Sniffer().sniff(header)
+            delimiter = dialect.delimiter
+
+            ids = _parse_header(header, delimiter)
+            first_id, _ = next(_parse_data(fh, delimiter), (None, None))
+
+            if first_id is not None and first_id == ids[0]:
+                return True, {'delimiter': delimiter}
+        except (csv.Error, LSMatFormatError):
+            pass
+
+    return False, {}
+
+
+ at register_reader('lsmat', DissimilarityMatrix)
+def _lsmat_to_dissimilarity_matrix(fh, delimiter='\t'):
+    return _lsmat_to_matrix(DissimilarityMatrix, fh, delimiter)
+
+
+ at register_reader('lsmat', DistanceMatrix)
+def _lsmat_to_distance_matrix(fh, delimiter='\t'):
+    return _lsmat_to_matrix(DistanceMatrix, fh, delimiter)
+
+
+ at register_writer('lsmat', DissimilarityMatrix)
+def _dissimilarity_matrix_to_lsmat(obj, fh, delimiter='\t'):
+    _matrix_to_lsmat(obj, fh, delimiter)
+
+
+ at register_writer('lsmat', DistanceMatrix)
+def _distance_matrix_to_lsmat(obj, fh, delimiter='\t'):
+    _matrix_to_lsmat(obj, fh, delimiter)
+
+
+def _lsmat_to_matrix(cls, fh, delimiter):
+    # We aren't using np.loadtxt because it uses *way* too much memory
+    # (e.g, a 2GB matrix eats up 10GB, which then isn't freed after parsing
+    # has finished). See:
+    # http://mail.scipy.org/pipermail/numpy-tickets/2012-August/006749.html
+
+    # Strategy:
+    #   - find the header
+    #   - initialize an empty ndarray
+    #   - for each row of data in the input file:
+    #     - populate the corresponding row in the ndarray with floats
+
+    header = _find_header(fh)
+    if header is None:
+        raise LSMatFormatError(
+            "Could not find a header line containing IDs in the "
+            "dissimilarity matrix file. Please verify that the file is "
+            "not empty.")
+
+    ids = _parse_header(header, delimiter)
+    num_ids = len(ids)
+    data = np.empty((num_ids, num_ids), dtype=np.float64)
+
+    row_idx = -1
+    for row_idx, (row_id, row_data) in enumerate(_parse_data(fh, delimiter)):
+        if row_idx >= num_ids:
+            # We've hit a nonempty line after we already filled the data
+            # matrix. Raise an error because we shouldn't ignore extra data.
+            raise LSMatFormatError(
+                "Encountered extra row(s) without corresponding IDs in "
+                "the header.")
+
+        num_vals = len(row_data)
+        if num_vals != num_ids:
+            raise LSMatFormatError(
+                "There are %d value(s) in row %d, which is not equal to the "
+                "number of ID(s) in the header (%d)." %
+                (num_vals, row_idx + 1, num_ids))
+
+        expected_id = ids[row_idx]
+        if row_id == expected_id:
+            data[row_idx, :] = np.asarray(row_data, dtype=float)
+        else:
+            raise LSMatFormatError(
+                "Encountered mismatched IDs while parsing the "
+                "dissimilarity matrix file. Found '%s' but expected "
+                "'%s'. Please ensure that the IDs match between the "
+                "dissimilarity matrix header (first row) and the row "
+                "labels (first column)." % (row_id, expected_id))
+
+    if row_idx != num_ids - 1:
+        raise LSMatFormatError("Expected %d row(s) of data, but found %d." %
+                               (num_ids, row_idx + 1))
+
+    return cls(data, ids)
+
+
+def _find_header(fh):
+    header = None
+
+    for line in fh:
+        stripped_line = line.strip()
+
+        if stripped_line and not stripped_line.startswith('#'):
+            # Don't strip the header because the first delimiter might be
+            # whitespace (e.g., tab).
+            header = line
+            break
+
+    return header
+
+
+def _parse_header(header, delimiter):
+    tokens = header.rstrip().split(delimiter)
+
+    if tokens[0]:
+        raise LSMatFormatError(
+            "Header must start with delimiter %r." % delimiter)
+
+    return [e.strip() for e in tokens[1:]]
+
+
+def _parse_data(fh, delimiter):
+    for line in fh:
+        stripped_line = line.strip()
+
+        if not stripped_line:
+            continue
+
+        tokens = line.rstrip().split(delimiter)
+        id_ = tokens[0].strip()
+
+        yield id_, tokens[1:]
+
+
+def _matrix_to_lsmat(obj, fh, delimiter):
+    ids = obj.ids
+    fh.write(_format_ids(ids, delimiter))
+    fh.write('\n')
+
+    for id_, vals in zip(ids, obj.data):
+        fh.write(id_)
+        fh.write(delimiter)
+        fh.write(delimiter.join(np.asarray(vals, dtype=np.str)))
+        fh.write('\n')
+
+
+def _format_ids(ids, delimiter):
+    return delimiter.join([''] + list(ids))
diff --git a/skbio/io/newick.py b/skbio/io/newick.py
new file mode 100644
index 0000000..9ed0375
--- /dev/null
+++ b/skbio/io/newick.py
@@ -0,0 +1,485 @@
+"""
+Newick format (:mod:`skbio.io.newick`)
+======================================
+
+.. currentmodule:: skbio.io.newick
+
+Newick format (``newick``) stores spanning-trees with weighted edges and node
+names in a minimal file format [1]_. This is useful for representing
+phylogenetic trees and taxonomies. Newick was created as an informal
+specification on June 26, 1986 [2]_.
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |Yes   |:mod:`skbio.tree.TreeNode`                                     |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+A Newick file represents a tree using the following grammar. See below for an
+explanation of the format in plain English.
+
+Formal Grammar
+^^^^^^^^^^^^^^
+.. code-block:: none
+
+          NEWICK ==> NODE ;
+            NODE ==> FORMATTING SUBTREE FORMATTING NODE_INFO FORMATTING
+         SUBTREE ==> ( CHILDREN ) | null
+       NODE_INFO ==> LABEL | LENGTH | LABEL FORMATTING LENGTH | null
+      FORMATTING ==> [ COMMENT_CHARS ] | whitespace | null
+        CHILDREN ==> NODE | CHILDREN , NODE
+           LABEL ==> ' ALL_CHARS ' | SAFE_CHARS
+          LENGTH ==> : FORMATTING NUMBER
+   COMMENT_CHARS ==> any
+       ALL_CHARS ==> any
+      SAFE_CHARS ==> any except: ,;:()[] and whitespace
+          NUMBER ==> a decimal or integer
+
+.. note:: The ``_`` character inside of SAFE_CHARS will be converted to a
+   blank space in ``skbio.tree.TreeNode`` and vice versa.
+
+   ``'`` is considered the escape character. To escape ``'`` use a
+   preceding ``'``.
+
+   The implementation of newick in scikit-bio allows nested comments. To
+   escape ``[`` or ``]`` from within COMMENT_CHARS, use a preceding ``'``.
+
+Explanation
+^^^^^^^^^^^
+The Newick format defines a tree by creating a minimal representation of nodes
+and their relationships to each other.
+
+Basic Symbols
+~~~~~~~~~~~~~
+There are several symbols which define nodes, the first of which is the
+semi-colon (``;``). The semi-colon creates a root node to its left. Recall that
+there can only be one root in a tree.
+
+The next symbol is the comma (``,``), which creates a node to its right.
+However, these two alone are not enough. For example imagine the following
+string: ``, , , ;``. It is evident that there is a root, but the other 3 nodes,
+defined by commas, have no relationship. For this reason, it is not a valid
+Newick string to have more than one node at the root level.
+
+To provide these relationships, there is another structure:
+paired parenthesis (``( )``). These are inserted at the location of an existing
+node and give it the ability to have children. Placing ``( )`` in a node's
+location will create a child inside the parenthesis on the left-most
+inner edge.
+
+Application of Rules
+~~~~~~~~~~~~~~~~~~~~
+Adding a comma within the parenthesis will create two children: ``( , )``
+(also known as a bifurcating node). Notice that only one comma is needed
+because the parenthesis have already created a child. Adding more commas will
+create more children who are siblings to each other. For example, writing
+``( , , , )`` will create a multifurcating node with 4 child nodes who are
+siblings to each other.
+
+The notation for a root can be used to create a complete tree. The ``;`` will
+create a root node where parenthesis can be placed: ``( );``. Adding commas
+will create more children: ``( , );``. These rules can be applied recursively
+ad. infinitum: ``(( , ), ( , ));``.
+
+Adding Node Information
+~~~~~~~~~~~~~~~~~~~~~~~
+Information about a node can be added to improve the clarity and meaning of a
+tree. Each node may have a label and/or a length (to the parent). Newick always
+places the node information at the right-most edge of a node's position.
+
+Starting with labels, ``(( , ), ( , ));`` would become
+``((D, E)B, (F, G)C)A;``. There is a named root ``A`` and the root's children
+(from left to right) are ``B`` and ``C``. ``B`` has the children ``D`` and
+``E``, and ``C`` has the children ``F`` and ``G``.
+
+Length represents the distance (or weight of the edge) that connects a node to
+its parent. This must be a decimal or integer. As an example, suppose ``D`` is
+rather estranged from ``B``, and ``E`` is very close. That can be written as:
+``((D:10, E:0.5)B, (F, G)C)A;``. Notice that the colon (``:``) separates the
+label from the length. If the length is provided but the label is omitted, a
+colon must still precede the length (``(:0.25,:0.5):0.0;``). Without this, the
+length would be interpreted as a label (which happens to be a number).
+
+.. note:: Internally scikit-bio will cast a length to ``float`` which
+   technically means that even exponent strings (``1e-3``) are supported)
+
+Advanced Label and Length Rules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+More characters can be used to create more descriptive labels. When creating a
+label there are some rules that must be considered due to limitations in the
+Newick format. The following characters are not allowed within a standard
+label: parenthesis, commas, square-brackets, colon, semi-colon, and whitespace.
+These characters are also disallowed from occurring within a length, which has
+a much stricter format: decimal or integer. Many of these characters are
+symbols which define the structure of a Newick tree and are thus disallowed for
+obvious reasons. The symbols not yet mentioned are square-brackets (``[ ]``)
+and whitespace (space, tab, and newline).
+
+What if these characters are needed within a label? In the simple case of
+spaces, an underscore (``_``) will be translated as a space on read and vice
+versa on write.
+
+What if a literal underscore or any of the others mentioned are needed?
+A label can be escaped (meaning that its contents are understood as regular
+text) using single-quotes (``'``). When a label is surrounded by single-quotes,
+any character is permissible. If a single-quote is needed inside of an escaped
+label or anywhere else, it can be escaped with another single-quote.
+For example, ``A_1`` is written ``'A_1'`` and ``'A'_1`` would be ``'''A''_1'``.
+
+Inline Comments
+~~~~~~~~~~~~~~~
+Square-brackets define a comment, which are the least commonly used part of
+the Newick format. Comments are not included in the generated objects and exist
+only as human readable text ignored by the parser. The implementation in
+scikit-bio allows for nested comments (``[comment [nested]]``). Unpaired
+square-brackets can be escaped with a single-quote preceding the bracket when
+inside an existing comment. (This is identical to escaping a single-quote).
+The single-quote has the highest operator precedence, so there is no need to
+worry about starting a comment from within a properly escaped label.
+
+Whitespace
+~~~~~~~~~~
+Whitespace is not allowed within any un-escaped label or in any length, but it
+is permitted anywhere else.
+
+Caveats
+~~~~~~~
+Newick cannot always provide a unique representation of any tree, in other
+words, the same tree can be written multiple ways. For example: ``(A, B);`` is
+isomorphic to ``(B, A);``. The implementation in scikit-bio maintains the given
+sibling order in its object representations.
+
+Newick has no representation of an unrooted tree. Some biological packages make
+the assumption that when a trifurcated root exists in an otherwise bifurcated
+tree that the tree must be unrooted. In scikit-bio, ``skbio.tree.TreeNode``
+will always be rooted at the ``newick`` root (``;``).
+
+Format Parameters
+-----------------
+The only supported format parameter is `convert_underscores`. This is `True` by
+default. When `False`, underscores found in unescaped labels will not be
+converted to spaces. This is useful when reading the output of an external
+program in which the underscores were not escaped. This parameter only affects
+`read` operations. It does not exist for `write` operations; they will always
+properly escape underscores.
+
+Examples
+--------
+This is a simple Newick string.
+
+>>> from StringIO import StringIO
+>>> from skbio import read
+>>> from skbio.tree import TreeNode
+>>> f = StringIO(u"((D, E)B, (F, G)C)A;")
+>>> tree = read(f, format="newick", into=TreeNode)
+>>> f.close()
+>>> print(tree.ascii_art())
+                    /-D
+          /B-------|
+         |          \-E
+-A-------|
+         |          /-F
+          \C-------|
+                    \-G
+
+This is a complex Newick string.
+
+>>> f = StringIO(u"[example](a:0.1, 'b_b''':0.2, (c:0.3, d_d:0.4)e:0.5)f:0.0;")
+>>> tree = read(f, format="newick", into=TreeNode)
+>>> f.close()
+>>> print(tree.ascii_art())
+          /-a
+         |
+-f-------|--b_b'
+         |
+         |          /-c
+          \e-------|
+                    \-d d
+
+Notice that the node originally labeled ``d_d`` became ``d d``. Additionally
+``'b_b'''`` became ``b_b'``. Note that the underscore was preserved in `b_b'`.
+
+References
+----------
+.. [1] http://evolution.genetics.washington.edu/phylip/newick_doc.html
+.. [2] http://evolution.genetics.washington.edu/phylip/newicktree.html
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from future.builtins import zip, range
+
+from skbio.io import (register_reader, register_writer, register_sniffer,
+                      NewickFormatError)
+from skbio.tree import TreeNode
+
+
+ at register_sniffer("newick")
+def _newick_sniffer(fh):
+    # Strategy:
+    #   The following conditions preclude a file from being newick:
+    #       * It is an empty file.
+    #       * There is whitespace inside of a label (handled by tokenizer)
+    #       * : is followed by anything that is an operator
+    #       * ( is not preceded immediately by , or another (
+    #       * The parens are unablanced when ; is found.
+    #   If 100 tokens (or less if EOF occurs earlier) then it is probably
+    #   newick, or at least we can't prove it isn't.
+    operators = set(",;:()")
+    empty = True
+    last_token = ','
+    indent = 0
+    try:
+        # 100 tokens ought to be enough for anybody.
+        for token, _ in zip(_tokenize_newick(fh), range(100)):
+            if token not in operators:
+                pass
+            elif token == ',' and last_token != ':' and indent > 0:
+                pass
+            elif token == ':' and last_token != ':':
+                pass
+            elif token == ';' and last_token != ':' and indent == 0:
+                pass
+            elif token == ')' and last_token != ':':
+                indent -= 1
+            elif token == '(' and (last_token == '(' or last_token == ','):
+                indent += 1
+            else:
+                raise NewickFormatError()
+
+            last_token = token
+            empty = False
+
+    except NewickFormatError:
+        return False, {}
+    return not empty, {}
+
+
+ at register_reader('newick', TreeNode)
+def _newick_to_tree_node(fh, convert_underscores=True):
+    tree_stack = []
+    current_depth = 0
+    last_token = ''
+    next_is_distance = False
+    root = TreeNode()
+    tree_stack.append((root, current_depth))
+    for token in _tokenize_newick(fh, convert_underscores=convert_underscores):
+        # Check for a label
+        if last_token not in '(,):':
+            if not next_is_distance:
+                tree_stack[-1][0].name = last_token if last_token else None
+            else:
+                next_is_distance = False
+        # Check for a distance
+        if token == ':':
+            next_is_distance = True
+        elif last_token == ':':
+            try:
+                tree_stack[-1][0].length = float(token)
+            except ValueError:
+                raise NewickFormatError("Could not read length as numeric type"
+                                        ": %s." % token)
+
+        elif token == '(':
+            current_depth += 1
+            tree_stack.append((TreeNode(), current_depth))
+        elif token == ',':
+            tree_stack.append((TreeNode(), current_depth))
+        elif token == ')':
+            if len(tree_stack) < 2:
+                raise NewickFormatError("Could not parse file as newick."
+                                        " Parenthesis are unbalanced.")
+            children = []
+            # Pop all nodes at this depth as they belong to the remaining
+            # node on the top of the stack as children.
+            while current_depth == tree_stack[-1][1]:
+                node, _ = tree_stack.pop()
+                children.insert(0, node)
+            parent = tree_stack[-1][0]
+            if parent.children:
+                raise NewickFormatError("Could not parse file as newick."
+                                        " Contains unnested children.")
+            # This is much faster than TreeNode.extend
+            for child in children:
+                child.parent = parent
+            parent.children = children
+            current_depth -= 1
+        elif token == ';':
+            if len(tree_stack) == 1:
+                return root
+            break
+
+        last_token = token
+
+    raise NewickFormatError("Could not parse file as newick."
+                            " `(Parenthesis)`, `'single-quotes'`,"
+                            " `[comments]` may be unbalanced, or tree may be"
+                            " missing its root.")
+
+
+ at register_writer("newick", TreeNode)
+def _tree_node_to_newick(obj, fh):
+    operators = set(",:_;()[]")
+    current_depth = 0
+    nodes_left = [(obj, 0)]
+    while len(nodes_left) > 0:
+        entry = nodes_left.pop()
+        node, node_depth = entry
+        if node.children and node_depth >= current_depth:
+            fh.write('(')
+            nodes_left.append(entry)
+            nodes_left += ((child, node_depth + 1) for child in
+                           reversed(node.children))
+            current_depth = node_depth + 1
+        else:
+            if node_depth < current_depth:
+                fh.write(')')
+                current_depth -= 1
+
+            # Note we don't check for None because there is no way to represent
+            # an empty string as a label in Newick. Therefore, both None and ''
+            # are considered to be the absence of a label.
+            if node.name:
+                escaped = node.name.replace("'", "''")
+                if any(t in operators for t in node.name):
+                    fh.write("'")
+                    fh.write(escaped)
+                    fh.write("'")
+                else:
+                    fh.write(escaped.replace(" ", "_"))
+            if node.length is not None:
+                fh.write(':')
+                fh.write(str(node.length))
+            if nodes_left and nodes_left[-1][1] == current_depth:
+                fh.write(',')
+
+    fh.write(';\n')
+
+
+def _tokenize_newick(fh, convert_underscores=True):
+    structure_tokens = set('(),;:')
+    not_escaped = True
+    label_start = False
+    last_non_ws_char = ''
+    last_char = ''
+    comment_depth = 0
+    metadata_buffer = []
+    # Strategy:
+    # We will iterate by character.
+    # Comments in newick are defined as:
+    # [This is a comment]
+    # Nested comments are allowed.
+    #
+    # The following characters indicate structure:
+    #      ( ) , ; :
+    #
+    # Whitespace is never allowed in a newick label, so an exception will be
+    # thrown.
+    #
+    # We use ' to indicate a literal string. It has the highest precedence of
+    # any operator.
+    for line in fh:
+        for character in line:
+            # We will start by handling the comment case.
+            # This code branch will probably never execute in practice.
+            # Using a comment_depth we can handle nested comments.
+            # Additionally if we are inside an escaped literal string, then
+            # we don't want to consider it a comment.
+            if character == "[" and not_escaped:
+                # Sometimes we might not want to nest a comment, so we will use
+                # our escape character. This is not explicitly mentioned in
+                # any format specification, but seems like what a reasonable
+                # person might do.
+                if last_non_ws_char != "'" or comment_depth == 0:
+                    # Once again, only advance our depth if [ has not been
+                    # escaped inside our comment.
+                    comment_depth += 1
+            if comment_depth > 0:
+                # Same as above, but in reverse
+                if character == "]" and last_non_ws_char != "'":
+                    comment_depth -= 1
+                last_non_ws_char = character
+                continue
+            # We are not in a comment block if we are below here.
+
+            # If we are inside of an escaped string literal, then ( ) , ; are
+            # meaningless to the structure.
+            # Otherwise, we are ready to submit our metadata token.
+            if not_escaped and character in structure_tokens:
+                label_start = False
+                metadata = ''.join(metadata_buffer)
+                # If the following condition is True, then we must have just
+                # closed a literal. We know this because last_non_ws_char is
+                # either None or the last non-whitespace character.
+                # last_non_ws_char is None when we have just escaped an escape
+                # and at the first iteration.
+                if last_non_ws_char == "'" or not convert_underscores:
+                    # Make no modifications.
+                    yield metadata
+                elif metadata:
+                    # Underscores are considered to be spaces when not in an
+                    # escaped literal string.
+                    yield metadata.replace('_', ' ')
+                # Clear our buffer for the next metadata token and yield our
+                # current structure token.
+                metadata_buffer = []
+                yield character
+            # We will now handle escaped string literals.
+            # They are inconvenient because any character inside of them is
+            # valid, especially whitespace.
+            # We also need to allow ' to be escaped by '. e.g. '' -> '
+            elif character == "'":
+                not_escaped = not not_escaped
+                label_start = True
+                if last_non_ws_char == "'":
+                    # We are escaping our escape, so it should be added to our
+                    # metadata_buffer which will represent some future token.
+                    metadata_buffer.append(character)
+                    # We do not want a running chain of overcounts, so we need
+                    # to clear the last character and continue iteration from
+                    # the top. Without this, the following would happen:
+                    # ''' ' -> '' <open literal>
+                    # What we want is:
+                    # ''' ' -> '<open literal> <close literal>
+                    last_non_ws_char = ''
+                    last_char = ''
+                    continue
+
+            elif not character.isspace() or not not_escaped:
+                if label_start and last_char.isspace() and not_escaped:
+                    raise NewickFormatError("Newick files cannot have"
+                                            " unescaped whitespace in their"
+                                            " labels.")
+                metadata_buffer.append(character)
+                label_start = True
+
+            # This is equivalent to an `else` however it prevents coverage from
+            # mis-identifying the `continue` as uncalled because cpython will
+            # optimize it to a jump that is slightly different from the normal
+            # jump it would have done anyways.
+            elif True:
+                # Skip the last statement
+                last_char = character
+                continue
+
+            last_char = character
+            # This line is skipped in the following cases:
+            #    * comment_depth > 0, i.e. we are in a comment.
+            #    * We have just processed the sequence '' and we don't want
+            #      the sequence ''' to result in ''.
+            #    * We have encountered whitespace that is not properly escaped.
+            last_non_ws_char = character
diff --git a/skbio/io/ordination.py b/skbio/io/ordination.py
new file mode 100644
index 0000000..28c5743
--- /dev/null
+++ b/skbio/io/ordination.py
@@ -0,0 +1,416 @@
+r"""
+Ordination results format (:mod:`skbio.io.ordination`)
+======================================================
+
+.. currentmodule:: skbio.io.ordination
+
+The ordination results file format (``ordination``) stores the results of an
+ordination method in a human-readable, text-based format. The format supports
+storing the results of various ordination methods available in scikit-bio,
+including (but not necessarily limited to) PCoA, CA, RDA, and CCA.
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |Yes   |:mod:`skbio.stats.ordination.OrdinationResults`                |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+The format is text-based, consisting of six attributes that describe the
+ordination results:
+
+- ``Eigvals``: 1-D
+- ``Proportion explained``: 1-D
+- ``Species``: 2-D
+- ``Site``: 2-D
+- ``Biplot``: 2-D
+- ``Site constraints``: 2-D
+
+The attributes in the file *must* be in this order.
+
+Each attribute is defined in its own section of the file, where sections are
+separated by a blank (or whitespace-only) line. Each attribute begins with a
+header line, which contains the attribute's name (as listed above), followed by
+a tab character, followed by one or more tab-separated dimensions (integers)
+that describe the shape of the attribute's data.
+
+The attribute's data follows its header line, and is stored in tab-separated
+format. ``Species``, ``Site``, and ``Site constraints`` store species and site
+IDs, respectively, as the first column, followed by the 2-D data array.
+
+An example of this file format might look like::
+
+    Eigvals<tab>4
+    0.36<tab>0.18<tab>0.07<tab>0.08
+
+    Proportion explained<tab>4
+    0.46<tab>0.23<tab>0.10<tab>0.10
+
+    Species<tab>9<tab>4
+    Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
+    Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
+    Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
+    Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
+    Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
+    Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
+    Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
+    Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
+    Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
+
+    Site<tab>10<tab>4
+    Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
+    Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
+    Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
+    Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
+    Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
+    Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
+    Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
+    Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
+    Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
+    Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
+
+    Biplot<tab>3<tab>3
+    -0.16<tab>0.63<tab>0.76
+    -0.99<tab>0.06<tab>-0.04
+    0.18<tab>-0.97<tab>0.03
+
+    Site constraints<tab>10<tab>4
+    Site0<tab>0.69<tab>-3.08<tab>-0.32<tab>-1.24
+    Site1<tab>0.66<tab>-3.06<tab>0.23<tab>2.69
+    Site2<tab>0.63<tab>-3.04<tab>0.78<tab>-3.11
+    Site3<tab>1.10<tab>0.50<tab>-1.55<tab>0.66
+    Site4<tab>-0.97<tab>0.06<tab>-1.12<tab>-0.61
+    Site5<tab>1.05<tab>0.53<tab>-0.43<tab>0.28
+    Site6<tab>-1.02<tab>0.10<tab>-0.00<tab>-0.42
+    Site7<tab>0.99<tab>0.57<tab>0.67<tab>-0.00
+    Site8<tab>-1.08<tab>0.13<tab>1.11<tab>1.17
+    Site9<tab>0.94<tab>0.61<tab>1.79<tab>-1.28
+
+
+If a given result attribute is not present (e.g. ``Biplot``), it should still
+be defined and declare its dimensions as 0. For example::
+
+    Biplot<tab>0<tab>0
+
+All attributes are optional except for ``Eigvals``.
+
+Examples
+--------
+Assume we have the following tab-delimited text file storing the
+ordination results in ``ordination`` format::
+
+    Eigvals<tab>4
+    0.36<tab>0.18<tab>0.07<tab>0.08
+
+    Proportion explained<tab>4
+    0.46<tab>0.23<tab>0.10<tab>0.10
+
+    Species<tab>9<tab>4
+    Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
+    Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
+    Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
+    Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
+    Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
+    Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
+    Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
+    Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
+    Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
+
+    Site<tab>10<tab>4
+    Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
+    Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
+    Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
+    Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
+    Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
+    Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
+    Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
+    Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
+    Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
+    Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
+
+    Biplot<tab>0<tab>0
+
+    Site constraints<tab>0<tab>0
+
+Load the ordination results from the file:
+
+>>> from StringIO import StringIO
+>>> from skbio.stats.ordination import OrdinationResults
+>>> or_f = StringIO(
+...  "Eigvals\t4\n"
+...  "0.36\t0.18\t0.07\t0.08\n"
+...  "\n"
+...  "Proportion explained\t4\n"
+...  "0.46\t0.23\t0.10\t0.10\n"
+...  "\n"
+...  "Species\t9\t4\n"
+...  "Species0\t0.11\t0.28\t-0.20\t-0.00\n"
+...  "Species1\t0.14\t0.30\t0.39\t-0.14\n"
+...  "Species2\t-1.01\t0.09\t-0.19\t-0.10\n"
+...  "Species3\t-1.03\t0.10\t0.22\t0.22\n"
+...  "Species4\t1.05\t0.53\t-0.43\t0.22\n"
+...  "Species5\t0.99\t0.57\t0.67\t-0.38\n"
+...  "Species6\t0.25\t-0.17\t-0.20\t0.43\n"
+...  "Species7\t0.14\t-0.85\t-0.01\t0.05\n"
+...  "Species8\t0.41\t-0.70\t0.21\t-0.69\n"
+...  "\n"
+...  "Site\t10\t4\n"
+...  "Site0\t0.71\t-3.08\t0.21\t-1.24\n"
+...  "Site1\t0.58\t-3.00\t-0.94\t2.69\n"
+...  "Site2\t0.76\t-3.15\t2.13\t-3.11\n"
+...  "Site3\t1.11\t1.07\t-1.87\t0.66\n"
+...  "Site4\t-0.97\t-0.06\t-0.69\t-0.61\n"
+...  "Site5\t1.04\t0.45\t-0.63\t0.28\n"
+...  "Site6\t-0.95\t-0.08\t0.13\t-0.42\n"
+...  "Site7\t0.94\t-0.10\t0.52\t-0.00\n"
+...  "Site8\t-1.14\t0.49\t0.47\t1.17\n"
+...  "Site9\t1.03\t1.03\t2.74\t-1.28\n"
+...  "\n"
+...  "Biplot\t0\t0\n"
+...  "\n"
+...  "Site constraints\t0\t0\n")
+>>> ord_res = OrdinationResults.read(or_f)
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import zip
+
+import numpy as np
+
+from skbio.stats.ordination import OrdinationResults
+from skbio.io import (register_reader, register_writer, register_sniffer,
+                      OrdinationFormatError)
+
+
+ at register_sniffer('ordination')
+def _ordination_sniffer(fh):
+    # Smells an ordination file if *all* of the following lines are present
+    # *from the beginning* of the file:
+    #   - eigvals header (minimally parsed)
+    #   - another line (contents ignored)
+    #   - a whitespace-only line
+    #   - proportion explained header (minimally parsed)
+    try:
+        _parse_header(fh, 'Eigvals', 1)
+        next_line = next(fh, None)
+
+        if next_line is not None:
+            _check_empty_line(fh)
+            _parse_header(fh, 'Proportion explained', 1)
+            return True, {}
+    except OrdinationFormatError:
+        pass
+
+    return False, {}
+
+
+ at register_reader('ordination', OrdinationResults)
+def _ordination_to_ordination_results(fh):
+    eigvals = _parse_vector_section(fh, 'Eigvals')
+    if eigvals is None:
+        raise OrdinationFormatError("At least one eigval must be present.")
+    _check_empty_line(fh)
+
+    prop_expl = _parse_vector_section(fh, 'Proportion explained')
+    _check_length_against_eigvals(prop_expl, eigvals,
+                                  'proportion explained values')
+    _check_empty_line(fh)
+
+    species, species_ids = _parse_array_section(fh, 'Species')
+    _check_length_against_eigvals(species, eigvals,
+                                  'coordinates per species')
+    _check_empty_line(fh)
+
+    site, site_ids = _parse_array_section(fh, 'Site')
+    _check_length_against_eigvals(site, eigvals,
+                                  'coordinates per site')
+    _check_empty_line(fh)
+
+    # biplot does not have ids to parse (the other arrays do)
+    biplot, _ = _parse_array_section(fh, 'Biplot', has_ids=False)
+    _check_empty_line(fh)
+
+    cons, cons_ids = _parse_array_section(fh, 'Site constraints')
+
+    if cons_ids is not None and site_ids is not None:
+        if cons_ids != site_ids:
+            raise OrdinationFormatError(
+                "Site constraints ids and site ids must be equal: %s != %s" %
+                (cons_ids, site_ids))
+
+    return OrdinationResults(
+        eigvals=eigvals, species=species, site=site, biplot=biplot,
+        site_constraints=cons, proportion_explained=prop_expl,
+        species_ids=species_ids, site_ids=site_ids)
+
+
+def _parse_header(fh, header_id, num_dimensions):
+    line = next(fh, None)
+    if line is None:
+        raise OrdinationFormatError(
+            "Reached end of file while looking for %s header." % header_id)
+
+    header = line.strip().split('\t')
+    # +1 for the header ID
+    if len(header) != num_dimensions + 1 or header[0] != header_id:
+        raise OrdinationFormatError("%s header not found." % header_id)
+    return header
+
+
+def _check_empty_line(fh):
+    """Check that the next line in `fh` is empty or whitespace-only."""
+    line = next(fh, None)
+    if line is None:
+        raise OrdinationFormatError(
+            "Reached end of file while looking for blank line separating "
+            "sections.")
+
+    if line.strip():
+        raise OrdinationFormatError("Expected an empty line.")
+
+
+def _check_length_against_eigvals(data, eigvals, label):
+    if data is not None:
+        num_vals = data.shape[-1]
+        num_eigvals = eigvals.shape[-1]
+
+        if num_vals != num_eigvals:
+            raise OrdinationFormatError(
+                "There should be as many %s as eigvals: %d != %d" %
+                (label, num_vals, num_eigvals))
+
+
+def _parse_vector_section(fh, header_id):
+    header = _parse_header(fh, header_id, 1)
+
+    # Parse how many values we are waiting for
+    num_vals = int(header[1])
+    if num_vals == 0:
+        # The ordination method didn't generate the vector, so set it to None
+        vals = None
+    else:
+        # Parse the line with the vector values
+        line = next(fh, None)
+        if line is None:
+            raise OrdinationFormatError(
+                "Reached end of file while looking for line containing values "
+                "for %s section." % header_id)
+        vals = np.asarray(line.strip().split('\t'), dtype=np.float64)
+        if len(vals) != num_vals:
+            raise OrdinationFormatError(
+                "Expected %d values in %s section, but found %d." %
+                (num_vals, header_id, len(vals)))
+    return vals
+
+
+def _parse_array_section(fh, header_id, has_ids=True):
+    """Parse an array section of `fh` identified by `header_id`."""
+    # Parse the array header
+    header = _parse_header(fh, header_id, 2)
+
+    # Parse the dimensions of the array
+    rows = int(header[1])
+    cols = int(header[2])
+
+    ids = None
+    if rows == 0 and cols == 0:
+        # The ordination method didn't generate the array data for 'header', so
+        # set it to None
+        data = None
+    elif rows == 0 or cols == 0:
+        # Both dimensions should be 0 or none of them are zero
+        raise OrdinationFormatError("One dimension of %s is 0: %d x %d" %
+                                    (header_id, rows, cols))
+    else:
+        # Parse the data
+        data = np.empty((rows, cols), dtype=np.float64)
+
+        if has_ids:
+            ids = []
+
+        for i in range(rows):
+            # Parse the next row of data
+            line = next(fh, None)
+            if line is None:
+                raise OrdinationFormatError(
+                    "Reached end of file while looking for row %d in %s "
+                    "section." % (i + 1, header_id))
+            vals = line.strip().split('\t')
+
+            if has_ids:
+                ids.append(vals[0])
+                vals = vals[1:]
+
+            if len(vals) != cols:
+                raise OrdinationFormatError(
+                    "Expected %d values, but found %d in row %d." %
+                    (cols, len(vals), i + 1))
+            data[i, :] = np.asarray(vals, dtype=np.float64)
+    return data, ids
+
+
+ at register_writer('ordination', OrdinationResults)
+def _ordination_results_to_ordination(obj, fh):
+    _write_vector_section(fh, 'Eigvals', obj.eigvals)
+    _write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
+    _write_array_section(fh, 'Species', obj.species, obj.species_ids)
+    _write_array_section(fh, 'Site', obj.site, obj.site_ids)
+    _write_array_section(fh, 'Biplot', obj.biplot)
+    _write_array_section(fh, 'Site constraints', obj.site_constraints,
+                         obj.site_ids, include_section_separator=False)
+
+
+def _write_vector_section(fh, header_id, vector):
+    if vector is None:
+        shape = 0
+    else:
+        shape = vector.shape[0]
+    fh.write("%s\t%d\n" % (header_id, shape))
+
+    if vector is not None:
+        fh.write(_format_vector(vector))
+    fh.write("\n")
+
+
+def _write_array_section(fh, header_id, data, ids=None,
+                         include_section_separator=True):
+    # write section header
+    if data is None:
+        shape = (0, 0)
+    else:
+        shape = data.shape
+    fh.write("%s\t%d\t%d\n" % (header_id, shape[0], shape[1]))
+
+    # write section data
+    if data is not None:
+        if ids is None:
+            for vals in data:
+                fh.write(_format_vector(vals))
+        else:
+            for id_, vals in zip(ids, data):
+                fh.write(_format_vector(vals, id_))
+
+    if include_section_separator:
+        fh.write("\n")
+
+
+def _format_vector(vector, id_=None):
+    formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str))
+
+    if id_ is None:
+        return "%s\n" % formatted_vector
+    else:
+        return "%s\t%s\n" % (id_, formatted_vector)
diff --git a/skbio/io/phylip.py b/skbio/io/phylip.py
new file mode 100644
index 0000000..3954cc6
--- /dev/null
+++ b/skbio/io/phylip.py
@@ -0,0 +1,245 @@
+"""
+PHYLIP multiple sequence alignment format (:mod:`skbio.io.phylip`)
+==================================================================
+
+.. currentmodule:: skbio.io.phylip
+
+The PHYLIP file format stores a multiple sequence alignment. The format was
+originally defined and used in Joe Felsenstein's PHYLIP package [1]_, and has
+since been supported by several other bioinformatics tools (e.g., RAxML [2]_).
+See [3]_ for the original format description, and [4]_ and [5]_ for additional
+descriptions.
+
+An example PHYLIP-formatted file taken from [3]_::
+
+          5    42
+    Turkey    AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
+    Salmo gairAAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT
+    H. SapiensACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA
+    Chimp     AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT
+    Gorilla   AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
+
+.. note:: Original copyright notice for the above PHYLIP file:
+
+   *(c) Copyright 1986-2008 by The University of Washington. Written by Joseph
+   Felsenstein. Permission is granted to copy this document provided that no
+   fee is charged for it and that this copyright notice is not removed.*
+
+Format Support
+--------------
+**Has Sniffer: No**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|No    |Yes   |:mod:`skbio.alignment.Alignment`                               |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+PHYLIP format is a plain text format containing exactly two sections: a header
+describing the dimensions of the alignment, followed by the multiple sequence
+alignment itself.
+
+The format described here is "strict" PHYLIP, as described in [4]_. Strict
+PHYLIP requires that each sequence identifier is exactly 10 characters long
+(padded with spaces as necessary). Other bioinformatics tools (e.g., RAxML) may
+relax this rule to allow for longer sequence identifiers. See the
+**Alignment Section** below for more details.
+
+The format described here is "sequential" format. The original PHYLIP format
+specification [3]_ describes both sequential and interleaved formats.
+
+.. note:: scikit-bio currently only supports writing strict, sequential
+   PHYLIP-formatted files from an ``skbio.alignment.Alignment``. It does not
+   yet support reading PHYLIP-formatted files, nor does it support relaxed or
+   interleaved PHYLIP formats.
+
+Header Section
+^^^^^^^^^^^^^^
+The header consists of a single line describing the dimensions of the
+alignment. It **must** be the first line in the file. The header consists of
+optional spaces, followed by two positive integers (``n`` and ``m``) separated
+by one or more spaces. The first integer (``n``) specifies the number of
+sequences (i.e., the number of rows) in the alignment. The second integer
+(``m``) specifies the length of the sequences (i.e., the number of columns) in
+the alignment. The smallest supported alignment dimensions are 1x1.
+
+.. note:: scikit-bio will write the PHYLIP format header *without* preceding
+   spaces, and with only a single space between ``n`` and ``m``.
+
+   PHYLIP format *does not* support blank line(s) between the header and the
+   alignment.
+
+Alignment Section
+^^^^^^^^^^^^^^^^^
+The alignment section immediately follows the header. It consists of ``n``
+lines (rows), one for each sequence in the alignment. Each row consists of a
+sequence identifier (ID) and characters in the sequence, in fixed width format.
+
+The sequence ID can be up to 10 characters long. IDs less than 10 characters
+must have spaces appended to them to reach the 10 character fixed width. Within
+an ID, all characters except newlines are supported, including spaces,
+underscores, and numbers.
+
+.. note:: While not explicitly stated in the original PHYLIP format
+   description, scikit-bio only supports writing unique sequence identifiers
+   (i.e., duplicates are not allowed). Uniqueness is required because an
+   ``skbio.alignment.Alignment`` cannot be created with duplicate IDs.
+
+   scikit-bio supports the empty string (``''``) as a valid sequence ID. An
+   empty ID will be padded with 10 spaces.
+
+Sequence characters immediately follow the sequence ID. They *must* start at
+the 11th character in the line, as the first 10 characters are reserved for the
+sequence ID. While PHYLIP format does not explicitly restrict the set of
+supported characters that may be used to represent a sequence, the original
+format description [3]_ specifies the IUPAC nucleic acid lexicon for DNA or RNA
+sequences, and the IUPAC protein lexicon for protein sequences. The original
+PHYLIP specification uses ``-`` as a gap character, though older versions also
+supported ``.``. The sequence characters may contain optional spaces (e.g., to
+improve readability), and both upper and lower case characters are supported.
+
+.. note:: scikit-bio will write a PHYLIP-formatted file even if the alignment's
+   sequence characters are not valid IUPAC characters. This differs from the
+   PHYLIP specification, which states that a PHYLIP-formatted file can only
+   contain valid IUPAC characters. To check whether all characters are valid
+   before writing, the user can call ``Alignment.is_valid()``.
+
+   Since scikit-bio supports both ``-`` and ``.`` as gap characters (e.g., in
+   ``skbio.alignment.Alignment``), both are supported when writing a
+   PHYLIP-formatted file.
+
+   When writing a PHYLIP-formatted file, scikit-bio will split up each sequence
+   into chunks that are 10 characters long. Each chunk will be separated by a
+   single space. The sequence will always appear on a single line (sequential
+   format). It will *not* be wrapped across multiple lines. Sequences are
+   chunked in this manner for improved readability, and because most example
+   PHYLIP files are chunked in a similar way (e.g., see the example file
+   above). Note that this chunking is not required by the PHYLIP format.
+
+Examples
+--------
+Let's create an alignment with three DNA sequences of equal length:
+
+>>> from skbio import Alignment, DNA
+>>> seqs = [DNA('ACCGTTGTA-GTAGCT', id='seq1'),
+...         DNA('A--GTCGAA-GTACCT', id='sequence-2'),
+...         DNA('AGAGTTGAAGGTATCT', id='3')]
+>>> aln = Alignment(seqs)
+>>> aln
+<Alignment: n=3; mean +/- std length=16.00 +/- 0.00>
+
+Now let's write the alignment to file in PHYLIP format, and take a look at the
+output:
+
+>>> from StringIO import StringIO
+>>> fh = StringIO()
+>>> aln.write(fh, format='phylip')
+>>> print(fh.getvalue())
+3 16
+seq1      ACCGTTGTA- GTAGCT
+sequence-2A--GTCGAA- GTACCT
+3         AGAGTTGAAG GTATCT
+<BLANKLINE>
+>>> fh.close()
+
+Notice that the 16-character sequences were split into two chunks, and that
+each sequence appears on a single line (sequential format). Also note that each
+sequence ID is padded with spaces to 10 characters in order to produce a fixed
+width column.
+
+If the sequence IDs in an alignment surpass the 10-character limit, an error
+will be raised when we try to write a PHYLIP file:
+
+>>> long_id_seqs = [DNA('ACCGT', id='seq1'),
+...                 DNA('A--GT', id='long-sequence-2'),
+...                 DNA('AGAGT', id='seq3')]
+>>> long_id_aln = Alignment(long_id_seqs)
+>>> fh = StringIO()
+>>> long_id_aln.write(fh, format='phylip')
+Traceback (most recent call last):
+    ...
+PhylipFormatError: Alignment can only be written in PHYLIP format if all \
+sequence IDs have 10 or fewer characters. Found sequence with ID \
+'long-sequence-2' that exceeds this limit. Use Alignment.update_ids to assign \
+shorter IDs.
+>>> fh.close()
+
+One way to work around this is to update the IDs to be shorter. The recommended
+way of accomplishing this is via ``Alignment.update_ids``, which provides a
+flexible way of creating a new ``Alignment`` with updated IDs. For example, to
+remap each of the IDs to integer-based IDs:
+
+>>> short_id_aln, _ = long_id_aln.update_ids()
+>>> short_id_aln.ids()
+['1', '2', '3']
+
+We can now write the new alignment in PHYLIP format:
+
+>>> fh = StringIO()
+>>> short_id_aln.write(fh, format='phylip')
+>>> print(fh.getvalue())
+3 5
+1         ACCGT
+2         A--GT
+3         AGAGT
+<BLANKLINE>
+>>> fh.close()
+
+References
+----------
+.. [1] http://evolution.genetics.washington.edu/phylip.html
+.. [2] RAxML Version 8: A tool for Phylogenetic Analysis and
+   Post-Analysis of Large Phylogenies". In Bioinformatics, 2014
+.. [3] http://evolution.genetics.washington.edu/phylip/doc/sequence.html
+.. [4] http://www.phylo.org/tools/obsolete/phylip.html
+.. [5] http://www.bioperl.org/wiki/PHYLIP_multiple_alignment_format
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from skbio.alignment import Alignment
+from skbio.io import register_writer, PhylipFormatError
+from skbio.io._base import _chunk_str
+
+
+ at register_writer('phylip', Alignment)
+def _alignment_to_phylip(obj, fh):
+
+    if obj.is_empty():
+        raise PhylipFormatError(
+            "Alignment can only be written in PHYLIP format if there is at "
+            "least one sequence in the alignment.")
+
+    sequence_length = obj.sequence_length()
+    if sequence_length == 0:
+        raise PhylipFormatError(
+            "Alignment can only be written in PHYLIP format if there is at "
+            "least one position in the alignment.")
+
+    chunk_size = 10
+    for id_ in obj.ids():
+        if len(id_) > chunk_size:
+            raise PhylipFormatError(
+                "Alignment can only be written in PHYLIP format if all "
+                "sequence IDs have %d or fewer characters. Found sequence "
+                "with ID '%s' that exceeds this limit. Use "
+                "Alignment.update_ids to assign shorter IDs." %
+                (chunk_size, id_))
+
+    sequence_count = obj.sequence_count()
+    fh.write('{0:d} {1:d}\n'.format(sequence_count, sequence_length))
+
+    fmt = '{0:%d}{1}\n' % chunk_size
+    for seq in obj:
+        chunked_seq = _chunk_str(str(seq), chunk_size, ' ')
+        fh.write(fmt.format(seq.id, chunked_seq))
diff --git a/skbio/io/qseq.py b/skbio/io/qseq.py
new file mode 100644
index 0000000..e8a67c0
--- /dev/null
+++ b/skbio/io/qseq.py
@@ -0,0 +1,253 @@
+r"""
+QSeq format (:mod:`skbio.io.qseq`)
+==================================
+
+.. currentmodule:: skbio.io.qseq
+
+The QSeq format (`qseq`) is a record-based, plain text output format produced
+by some DNA sequencers for storing biological sequence data, quality scores,
+per-sequence filtering information, and run-specific metadata.
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
++------+------+---------------------------------------------------------------+
+|Reader|Writer|                          Object Class                         |
++======+======+===============================================================+
+|Yes   |No    |generator of :mod:`skbio.sequence.BiologicalSequence` objects  |
++------+------+---------------------------------------------------------------+
+|Yes   |No    |:mod:`skbio.alignment.SequenceCollection`                      |
++------+------+---------------------------------------------------------------+
+|Yes   |No    |:mod:`skbio.sequence.BiologicalSequence`                       |
++------+------+---------------------------------------------------------------+
+|Yes   |No    |:mod:`skbio.sequence.NucleotideSequence`                       |
++------+------+---------------------------------------------------------------+
+|Yes   |No    |:mod:`skbio.sequence.DNASequence`                              |
++------+------+---------------------------------------------------------------+
+|Yes   |No    |:mod:`skbio.sequence.RNASequence`                              |
++------+------+---------------------------------------------------------------+
+|Yes   |No    |:mod:`skbio.sequence.ProteinSequence`                          |
++------+------+---------------------------------------------------------------+
+
+Format Specification
+--------------------
+A QSeq file is composed of single-line records, delimited by tabs. There are
+11 fields in a record:
+
+- Machine name
+- Run number
+- Lane number (positive int)
+- Tile number (positive int)
+- X coordinate (integer)
+- Y coordinate (integer)
+- Index
+- Read number (1-3)
+- Sequence data (typically IUPAC characters)
+- Quality scores (quality scores encoded as printable ASCII)
+- Filter boolean (1 if sequence has passed CASAVA's filter, 0 otherwise)
+
+For more details please refer to the CASAVA documentation [1]_.
+
+.. note:: scikit-bio allows for the filter field to be ommitted, but it is not
+   clear if this is part of the original format specification.
+
+Format Parameters
+-----------------
+The following parameters are the same as in FASTQ format
+(:mod:`skbio.io.fastq`):
+
+- ``variant``: see ``variant`` parameter in FASTQ format
+- ``phred_offset``: see ``phred_offset`` parameter in FASTQ format
+
+The following additional parameters are the same as in FASTA format
+(:mod:`skbio.io.fasta`):
+
+- ``constructor``: see ``constructor`` parameter in FASTA format
+- ``seq_num``: see ``seq_num`` parameter in FASTA format
+
+SequenceCollection and Generators Only
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- ``filter``: If `True`, excludes sequences that did not pass filtering
+  (i.e., filter field is 0). Default is `True`.
+
+Examples
+--------
+Suppose we have the following QSeq file::
+
+    illumina	1	3	34	-30	30	0	1	ACG....ACGTAC	ruBBBBrBCEFGH	1
+    illumina	1	3	34	30	-30	0	1	CGGGCATTGCA	CGGGCasdGCA	0
+    illumina	1	3	35	-30	30	0	2	ACGTA.AATAAAC	geTaAafhwqAAf	1
+    illumina	1	3	35	30	-30	0	3	CATTTAGGA.TGCA	tjflkAFnkKghvM	0
+
+Let's define this file in-memory as a ``StringIO``, though this could be a real
+file path, file handle, or anything that's supported by scikit-bio's I/O
+registry in practice:
+
+>>> from StringIO import StringIO
+>>> fs = '\n'.join([
+...     'illumina\t1\t3\t34\t-30\t30\t0\t1\tACG....ACGTAC\truBBBBrBCEFGH\t1',
+...     'illumina\t1\t3\t34\t30\t-30\t0\t1\tCGGGCATTGCA\tCGGGCasdGCA\t0',
+...     'illumina\t1\t3\t35\t-30\t30\t0\t2\tACGTA.AATAAAC\tgeTaAafhwqAAf\t1',
+...     'illumina\t1\t3\t35\t30\t-30\t0\t3\tCATTTAGGA.TGCA\ttjflkAFnkKghvM\t0'
+... ])
+>>> fh = StringIO(fs)
+
+To load the sequences into a ``SequenceCollection``, we run:
+
+>>> from skbio import SequenceCollection
+>>> sc = SequenceCollection.read(fh, variant='illumina1.3')
+>>> sc
+<SequenceCollection: n=2; mean +/- std length=13.00 +/- 0.00>
+
+Note that only two sequences were loaded because the QSeq reader filters out
+sequences whose filter field is 0 (unless ``filter=False`` is supplied).
+
+References
+----------
+.. [1] http://biowulf.nih.gov/apps/CASAVA_UG_15011196B.pdf
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from future.builtins import zip, range
+
+from skbio.io import register_reader, register_sniffer, QSeqFormatError
+from skbio.io._base import _decode_qual_to_phred, _get_nth_sequence
+from skbio.alignment import SequenceCollection
+from skbio.sequence import (BiologicalSequence, NucleotideSequence,
+                            DNASequence, RNASequence, ProteinSequence)
+
+_default_phred_offset = None
+_default_variant = None
+_will_filter = True
+
+
+ at register_sniffer('qseq')
+def _qseq_sniffer(fh):
+    empty = True
+    try:
+        for _, line in zip(range(10), fh):
+            _record_parser(line)
+            empty = False
+        return not empty, {}
+    except QSeqFormatError:
+        return False, {}
+
+
+ at register_reader('qseq')
+def _qseq_to_generator(fh, constructor=BiologicalSequence, filter=_will_filter,
+                       phred_offset=_default_phred_offset,
+                       variant=_default_variant):
+    for line in fh:
+        (machine_name, run, lane, tile, x, y, index, read, seq, raw_qual,
+         filtered) = _record_parser(line)
+        if not filter or not filtered:
+            phred = _decode_qual_to_phred(raw_qual, variant, phred_offset)
+            seq_id = '%s_%s:%s:%s:%s:%s#%s/%s' % (
+                machine_name, run, lane, tile, x, y, index, read)
+            yield constructor(seq, quality=phred, id=seq_id)
+
+
+ at register_reader('qseq', SequenceCollection)
+def _qseq_to_sequence_collection(fh, constructor=BiologicalSequence,
+                                 filter=_will_filter,
+                                 phred_offset=_default_phred_offset,
+                                 variant=_default_variant):
+    return SequenceCollection(list(_qseq_to_generator(
+        fh, constructor=constructor, filter=filter, phred_offset=phred_offset,
+        variant=variant)))
+
+
+ at register_reader('qseq', BiologicalSequence)
+def _qseq_to_biological_sequence(fh, seq_num=1,
+                                 phred_offset=_default_phred_offset,
+                                 variant=_default_variant):
+    return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
+                             phred_offset=phred_offset, variant=variant,
+                             constructor=BiologicalSequence), seq_num)
+
+
+ at register_reader('qseq', NucleotideSequence)
+def _qseq_to_nucleotide_sequence(fh, seq_num=1,
+                                 phred_offset=_default_phred_offset,
+                                 variant=_default_variant):
+    return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
+                             phred_offset=phred_offset, variant=variant,
+                             constructor=NucleotideSequence), seq_num)
+
+
+ at register_reader('qseq', DNASequence)
+def _qseq_to_dna_sequence(fh, seq_num=1,
+                          phred_offset=_default_phred_offset,
+                          variant=_default_variant):
+    return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
+                             phred_offset=phred_offset, variant=variant,
+                             constructor=DNASequence), seq_num)
+
+
+ at register_reader('qseq', RNASequence)
+def _qseq_to_rna_sequence(fh, seq_num=1,
+                          phred_offset=_default_phred_offset,
+                          variant=_default_variant):
+    return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
+                             phred_offset=phred_offset, variant=variant,
+                             constructor=RNASequence), seq_num)
+
+
+ at register_reader('qseq', ProteinSequence)
+def _qseq_to_protein_sequence(fh, seq_num=1,
+                              phred_offset=_default_phred_offset,
+                              variant=_default_variant):
+    return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
+                             phred_offset=phred_offset, variant=variant,
+                             constructor=ProteinSequence), seq_num)
+
+
+def _record_parser(line):
+    fields = line.rstrip('\n')
+    if fields:
+        fields = fields.split('\t')
+    else:
+        raise QSeqFormatError('Found blank line.')
+    f_len = len(fields)
+    if not (10 <= f_len <= 11):
+        raise QSeqFormatError('Expected 10 or 11 fields, found %d.' % f_len)
+    # If the filter field was ommitted, assume that it passed filtering:
+    if f_len == 10:
+        fields.append('1')
+
+    (machine, run, lane, tile, x, y, index, read, seq, raw_qaul,
+     filter) = fields
+
+    _test_fields([('filter', filter)], lambda x: x in '01',
+                 "0 or 1")
+
+    _test_fields([('read', read)], lambda x: x in '123',
+                 "in the range [1, 3]")
+
+    _test_fields([('x', x), ('y', y)], lambda x: int(x) is not None,
+                 "an integer")
+
+    _test_fields([('lane', lane), ('tile', tile)], lambda x: int(x) >= 0,
+                 "a positive integer")
+
+    return (machine, run, lane, tile, x, y, index, read, seq, raw_qaul,
+            filter == '0')
+
+
+def _test_fields(iterkv, test, efrag):
+    try:
+        for k, v in iterkv:
+            if not test(v):
+                raise ValueError()
+    except ValueError:
+        raise QSeqFormatError('Field %r is not %s.' % (k, efrag))
diff --git a/skbio/io/tests/__init__.py b/skbio/io/tests/__init__.py
new file mode 100644
index 0000000..0bf0c55
--- /dev/null
+++ b/skbio/io/tests/__init__.py
@@ -0,0 +1,7 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/io/tests/data/empty b/skbio/io/tests/data/empty
new file mode 100644
index 0000000..e69de29
diff --git a/skbio/io/tests/data/error_diff_ids.fastq b/skbio/io/tests/data/error_diff_ids.fastq
new file mode 100644
index 0000000..7e1fa03
--- /dev/null
+++ b/skbio/io/tests/data/error_diff_ids.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_124
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_double_qual.fastq b/skbio/io/tests/data/error_double_qual.fastq
new file mode 100644
index 0000000..6c7af1b
--- /dev/null
+++ b/skbio/io/tests/data/error_double_qual.fastq
@@ -0,0 +1,22 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_double_seq.fastq b/skbio/io/tests/data/error_double_seq.fastq
new file mode 100644
index 0000000..c116e35
--- /dev/null
+++ b/skbio/io/tests/data/error_double_seq.fastq
@@ -0,0 +1,22 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_long_qual.fastq b/skbio/io/tests/data/error_long_qual.fastq
new file mode 100644
index 0000000..90933c4
--- /dev/null
+++ b/skbio/io/tests/data/error_long_qual.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWYY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_no_qual.fastq b/skbio/io/tests/data/error_no_qual.fastq
new file mode 100644
index 0000000..99e7f46
--- /dev/null
+++ b/skbio/io/tests/data/error_no_qual.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGC
++SLXA-B3_649_FC8437_R1_1_1_850_123
+
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+
diff --git a/skbio/io/tests/data/error_qual_del.fastq b/skbio/io/tests/data/error_qual_del.fastq
new file mode 100644
index 0000000..3e24a25
--- /dev/null
+++ b/skbio/io/tests/data/error_qual_del.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_qual_escape.fastq b/skbio/io/tests/data/error_qual_escape.fastq
new file mode 100644
index 0000000..1bc95ed
--- /dev/null
+++ b/skbio/io/tests/data/error_qual_escape.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_qual_null.fastq b/skbio/io/tests/data/error_qual_null.fastq
new file mode 100644
index 0000000..fcb03de
Binary files /dev/null and b/skbio/io/tests/data/error_qual_null.fastq differ
diff --git a/skbio/io/tests/data/error_qual_space.fastq b/skbio/io/tests/data/error_qual_space.fastq
new file mode 100644
index 0000000..b98aa65
--- /dev/null
+++ b/skbio/io/tests/data/error_qual_space.fastq
@@ -0,0 +1,21 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYY WWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
+
diff --git a/skbio/io/tests/data/error_qual_tab.fastq b/skbio/io/tests/data/error_qual_tab.fastq
new file mode 100644
index 0000000..ce2ab3a
--- /dev/null
+++ b/skbio/io/tests/data/error_qual_tab.fastq
@@ -0,0 +1,21 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYY	YYYYWYWWUWWWQQ
+
diff --git a/skbio/io/tests/data/error_qual_unit_sep.fastq b/skbio/io/tests/data/error_qual_unit_sep.fastq
new file mode 100644
index 0000000..63d3cd0
--- /dev/null
+++ b/skbio/io/tests/data/error_qual_unit_sep.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_qual_vtab.fastq b/skbio/io/tests/data/error_qual_vtab.fastq
new file mode 100644
index 0000000..74b3482
--- /dev/null
+++ b/skbio/io/tests/data/error_qual_vtab.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYY
YYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_short_qual.fastq b/skbio/io/tests/data/error_short_qual.fastq
new file mode 100644
index 0000000..9ce682f
--- /dev/null
+++ b/skbio/io/tests/data/error_short_qual.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYS
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQQ
diff --git a/skbio/io/tests/data/error_spaces.fastq b/skbio/io/tests/data/error_spaces.fastq
new file mode 100644
index 0000000..9430a45
--- /dev/null
+++ b/skbio/io/tests/data/error_spaces.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAA TACCTTTGTA GAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYY YYYYYYYYYW YWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGA AAGAGAAATG AGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYY WYYYYWWYYY WYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTT GATCATGATG ATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYY YYYYWYYWYY SYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAA GTTTTTCTCA ACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYY YYYYYYYYYW WWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTT AATGGCATAC ACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYY YWYYYYWYWW UWWWQQ
diff --git a/skbio/io/tests/data/error_tabs.fastq b/skbio/io/tests/data/error_tabs.fastq
new file mode 100644
index 0000000..8572a23
--- /dev/null
+++ b/skbio/io/tests/data/error_tabs.fastq
@@ -0,0 +1,21 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAA	TACCTTTGTA	GAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYY	YYYYYYYYYW	YWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGA	AAGAGAAATG	AGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYY	WYYYYWWYYY	WYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTT	GATCATGATG	ATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYY	YYYYWYYWYY	SYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAA	GTTTTTCTCA	ACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYY	YYYYYYYYYW	WWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTT	AATGGCATAC ACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYY	YWYYYYWYWW	UWWWQQ
+
diff --git a/skbio/io/tests/data/error_trunc_at_plus.fastq b/skbio/io/tests/data/error_trunc_at_plus.fastq
new file mode 100644
index 0000000..0398a58
--- /dev/null
+++ b/skbio/io/tests/data/error_trunc_at_plus.fastq
@@ -0,0 +1,19 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
+
diff --git a/skbio/io/tests/data/error_trunc_at_qual.fastq b/skbio/io/tests/data/error_trunc_at_qual.fastq
new file mode 100644
index 0000000..55b2f85
--- /dev/null
+++ b/skbio/io/tests/data/error_trunc_at_qual.fastq
@@ -0,0 +1,19 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
diff --git a/skbio/io/tests/data/error_trunc_at_seq.fastq b/skbio/io/tests/data/error_trunc_at_seq.fastq
new file mode 100644
index 0000000..5d760c4
--- /dev/null
+++ b/skbio/io/tests/data/error_trunc_at_seq.fastq
@@ -0,0 +1,18 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+
diff --git a/skbio/io/tests/data/error_trunc_in_plus.fastq b/skbio/io/tests/data/error_trunc_in_plus.fastq
new file mode 100644
index 0000000..703b8a7
--- /dev/null
+++ b/skbio/io/tests/data/error_trunc_in_plus.fastq
@@ -0,0 +1,19 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC
\ No newline at end of file
diff --git a/skbio/io/tests/data/error_trunc_in_qual.fastq b/skbio/io/tests/data/error_trunc_in_qual.fastq
new file mode 100644
index 0000000..3b6f1e1
--- /dev/null
+++ b/skbio/io/tests/data/error_trunc_in_qual.fastq
@@ -0,0 +1,20 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGGCATACACTCAA
++SLXA-B3_649_FC8437_R1_1_1_183_714
+YYYYYYYYYYWYYYYWYWWUWWWQ
diff --git a/skbio/io/tests/data/error_trunc_in_seq.fastq b/skbio/io/tests/data/error_trunc_in_seq.fastq
new file mode 100644
index 0000000..8c81722
--- /dev/null
+++ b/skbio/io/tests/data/error_trunc_in_seq.fastq
@@ -0,0 +1,18 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_183_714
+GTATTATTTAATGG
\ No newline at end of file
diff --git a/skbio/io/tests/data/error_trunc_in_title.fastq b/skbio/io/tests/data/error_trunc_in_title.fastq
new file mode 100644
index 0000000..dfbb443
--- /dev/null
+++ b/skbio/io/tests/data/error_trunc_in_title.fastq
@@ -0,0 +1,17 @@
+ at SLXA-B3_649_FC8437_R1_1_1_610_79
+GATGTGCAATACCTTTGTAGAGGAA
++SLXA-B3_649_FC8437_R1_1_1_610_79
+YYYYYYYYYYYYYYYYYYWYWYYSU
+ at SLXA-B3_649_FC8437_R1_1_1_397_389
+GGTTTGAGAAAGAGAAATGAGATAA
++SLXA-B3_649_FC8437_R1_1_1_397_389
+YYYYYYYYYWYYYYWWYYYWYWYWW
+ at SLXA-B3_649_FC8437_R1_1_1_850_123
+GAGGGTGTTGATCATGATGATGGCG
++SLXA-B3_649_FC8437_R1_1_1_850_123
+YYYYYYYYYYYYYWYYWYYSYYYSY
+ at SLXA-B3_649_FC8437_R1_1_1_362_549
+GGAAACAAAGTTTTTCTCAACATAG
++SLXA-B3_649_FC8437_R1_1_1_362_549
+YYYYYYYYYYYYYYYYYYWWWWYWY
+ at SLXA-B3_649_FC8437_R1_1_1_
\ No newline at end of file
diff --git a/skbio/io/tests/data/fasta_10_seqs b/skbio/io/tests/data/fasta_10_seqs
new file mode 100644
index 0000000..35049e6
--- /dev/null
+++ b/skbio/io/tests/data/fasta_10_seqs
@@ -0,0 +1,24 @@
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+A
+C
+GT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+AC
+G
+T
diff --git a/skbio/io/tests/data/fasta_3_seqs_defaults b/skbio/io/tests/data/fasta_3_seqs_defaults
new file mode 100644
index 0000000..dcdd3e0
--- /dev/null
+++ b/skbio/io/tests/data/fasta_3_seqs_defaults
@@ -0,0 +1,6 @@
+>s_e_q_1 desc 1
+UUUU
+>s_e_q_2 desc 2
+CATC
+>s_e_q_3 desc 3
+sits
diff --git a/skbio/io/tests/data/fasta_3_seqs_non_defaults b/skbio/io/tests/data/fasta_3_seqs_non_defaults
new file mode 100644
index 0000000..536ca6b
--- /dev/null
+++ b/skbio/io/tests/data/fasta_3_seqs_non_defaults
@@ -0,0 +1,9 @@
+>s*e*q*1 desc+1
+UUU
+U
+>s*e*q*2 desc+2
+CAT
+C
+>s*e*q*3 desc+3
+sit
+s
diff --git a/skbio/io/tests/data/fasta_description_newline_replacement_empty_str b/skbio/io/tests/data/fasta_description_newline_replacement_empty_str
new file mode 100644
index 0000000..661724c
--- /dev/null
+++ b/skbio/io/tests/data/fasta_description_newline_replacement_empty_str
@@ -0,0 +1,4 @@
+>proteinseq detaileddescription 		with  newlines
+pQqqqPPQQQ
+>foo
+AGGAGAATA
diff --git a/skbio/io/tests/data/fasta_description_newline_replacement_multi_char b/skbio/io/tests/data/fasta_description_newline_replacement_multi_char
new file mode 100644
index 0000000..726fe41
--- /dev/null
+++ b/skbio/io/tests/data/fasta_description_newline_replacement_multi_char
@@ -0,0 +1,4 @@
+>proteinseq :-)detailed:-)description 		with  new:-):-)lines:-):-):-)
+pQqqqPPQQQ
+>foo :-):-):-):-)
+AGGAGAATA
diff --git a/skbio/io/tests/data/fasta_description_newline_replacement_none b/skbio/io/tests/data/fasta_description_newline_replacement_none
new file mode 100644
index 0000000..050dc00
--- /dev/null
+++ b/skbio/io/tests/data/fasta_description_newline_replacement_none
@@ -0,0 +1,15 @@
+>proteinseq 
+detailed
+description 		with  new
+
+lines
+
+
+
+pQqqqPPQQQ
+>foo 
+
+
+
+
+AGGAGAATA
diff --git a/skbio/io/tests/data/fasta_id_whitespace_replacement_empty_str b/skbio/io/tests/data/fasta_id_whitespace_replacement_empty_str
new file mode 100644
index 0000000..d22dda2
--- /dev/null
+++ b/skbio/io/tests/data/fasta_id_whitespace_replacement_empty_str
@@ -0,0 +1,4 @@
+>seq2
+A
+> a b
+UA
diff --git a/skbio/io/tests/data/fasta_id_whitespace_replacement_multi_char b/skbio/io/tests/data/fasta_id_whitespace_replacement_multi_char
new file mode 100644
index 0000000..68382ff
--- /dev/null
+++ b/skbio/io/tests/data/fasta_id_whitespace_replacement_multi_char
@@ -0,0 +1,4 @@
+>>:o>:o>:o>:o>:oseq>:o>:o2>:o
+A
+>>:o>:o>:o>:o a b
+UA
diff --git a/skbio/io/tests/data/fasta_id_whitespace_replacement_none b/skbio/io/tests/data/fasta_id_whitespace_replacement_none
new file mode 100644
index 0000000..dd5a7ef
--- /dev/null
+++ b/skbio/io/tests/data/fasta_id_whitespace_replacement_none
@@ -0,0 +1,7 @@
+> 
+  
+seq 	2 
+A
+>
+	 	 a b
+UA
diff --git a/skbio/io/tests/data/fasta_invalid_after_10_seqs b/skbio/io/tests/data/fasta_invalid_after_10_seqs
new file mode 100644
index 0000000..b3e366f
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_after_10_seqs
@@ -0,0 +1,25 @@
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+A
+C
+GT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+ACGT
+>seq1 desc1
+AC
+G
+T
+>seq1 desc1
diff --git a/skbio/io/tests/data/fasta_invalid_blank_line b/skbio/io/tests/data/fasta_invalid_blank_line
new file mode 100644
index 0000000..dfa357d
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_blank_line
@@ -0,0 +1,7 @@
+>seq1 desc1
+ACGT
+>seq2 desc2
+AAAAA
+
+>seq3 desc3
+CCC
diff --git a/skbio/io/tests/data/fasta_invalid_legacy_format b/skbio/io/tests/data/fasta_invalid_legacy_format
new file mode 100644
index 0000000..63a6fce
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_legacy_format
@@ -0,0 +1,2 @@
+; legacy-seq-id legacy description
+ACGT
diff --git a/skbio/io/tests/data/fasta_invalid_missing_header b/skbio/io/tests/data/fasta_invalid_missing_header
new file mode 100644
index 0000000..5fbaa4e
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_missing_header
@@ -0,0 +1,2 @@
+seq1 desc1
+ACGT
diff --git a/skbio/io/tests/data/fasta_invalid_missing_seq_data_first b/skbio/io/tests/data/fasta_invalid_missing_seq_data_first
new file mode 100644
index 0000000..1fec845
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_missing_seq_data_first
@@ -0,0 +1,5 @@
+>seq1 desc1
+>seq2 desc2
+AAAAA
+>seq3 desc3
+CCC
diff --git a/skbio/io/tests/data/fasta_invalid_missing_seq_data_last b/skbio/io/tests/data/fasta_invalid_missing_seq_data_last
new file mode 100644
index 0000000..96f62ea
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_missing_seq_data_last
@@ -0,0 +1,5 @@
+>seq1 desc1
+ACGT
+>seq2 desc2
+AAAAA
+>seq3 desc3
diff --git a/skbio/io/tests/data/fasta_invalid_missing_seq_data_middle b/skbio/io/tests/data/fasta_invalid_missing_seq_data_middle
new file mode 100644
index 0000000..746cfe1
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_missing_seq_data_middle
@@ -0,0 +1,5 @@
+>seq1 desc1
+ACGT
+>seq2 desc2
+>seq3 desc3
+CCC
diff --git a/skbio/io/tests/data/fasta_invalid_whitespace_only_line b/skbio/io/tests/data/fasta_invalid_whitespace_only_line
new file mode 100644
index 0000000..ed70c42
--- /dev/null
+++ b/skbio/io/tests/data/fasta_invalid_whitespace_only_line
@@ -0,0 +1,7 @@
+>seq1 desc1
+ACGT
+>seq2 desc2
+AAAAA
+		     	   
+>seq3 desc3
+CCC
diff --git a/skbio/io/tests/data/fasta_max_width_1 b/skbio/io/tests/data/fasta_max_width_1
new file mode 100644
index 0000000..e685f7d
--- /dev/null
+++ b/skbio/io/tests/data/fasta_max_width_1
@@ -0,0 +1,11 @@
+>seq1 desc1
+A
+C
+G
+T
+-
+a
+c
+g
+t
+.
diff --git a/skbio/io/tests/data/fasta_max_width_5 b/skbio/io/tests/data/fasta_max_width_5
new file mode 100644
index 0000000..f2dad0d
--- /dev/null
+++ b/skbio/io/tests/data/fasta_max_width_5
@@ -0,0 +1,20 @@
+>seq1 desc1
+ACGT-
+acgt.
+>_____seq__2_
+A
+> desc3
+AACGG
+uA
+>
+AcGtU
+Tu
+>
+ACGTT
+GCAcc
+GG
+>
+ACGUU
+>proteinseq  detailed description 		with  new  lines   
+pQqqq
+PPQQQ
diff --git a/skbio/io/tests/data/fasta_mixed_qual_scores b/skbio/io/tests/data/fasta_mixed_qual_scores
new file mode 100644
index 0000000..5b86e53
--- /dev/null
+++ b/skbio/io/tests/data/fasta_mixed_qual_scores
@@ -0,0 +1,4 @@
+>seq1 desc1
+ACGT-acgt.
+>da,dadadada 10 hours
+AAAAT
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/tests/data/fasta_multi_seq
new file mode 100644
index 0000000..906b765
--- /dev/null
+++ b/skbio/io/tests/data/fasta_multi_seq
@@ -0,0 +1,14 @@
+>seq1 desc1
+ACGT-acgt.
+>_____seq__2_
+A
+> desc3
+AACGGuA
+>
+AcGtUTu
+>
+ACGTTGCAccGG
+>
+ACGUU
+>proteinseq  detailed description 		with  new  lines   
+pQqqqPPQQQ
diff --git a/skbio/io/tests/data/fasta_multi_seq_roundtrip b/skbio/io/tests/data/fasta_multi_seq_roundtrip
new file mode 100644
index 0000000..c01fece
--- /dev/null
+++ b/skbio/io/tests/data/fasta_multi_seq_roundtrip
@@ -0,0 +1,6 @@
+>seq-a a's description
+ACATAGGTA
+>seq-b b's description
+TAGATAGATAGA
+>seq-c c's description
+CATCATCATCATCATCATCAT
diff --git a/skbio/io/tests/data/fasta_prot_seqs_odd_labels b/skbio/io/tests/data/fasta_prot_seqs_odd_labels
new file mode 100644
index 0000000..30a19f4
--- /dev/null
+++ b/skbio/io/tests/data/fasta_prot_seqs_odd_labels
@@ -0,0 +1,8 @@
+>	  		     
+  	DEFQ			   	
+ 	 fp 			  
+>  	   skbio       			   
+S
+ K
+  B 
+   I
diff --git a/skbio/io/tests/data/fasta_sequence_collection_different_type b/skbio/io/tests/data/fasta_sequence_collection_different_type
new file mode 100644
index 0000000..5fb090f
--- /dev/null
+++ b/skbio/io/tests/data/fasta_sequence_collection_different_type
@@ -0,0 +1,6 @@
+> 
+AUG
+>rnaseq-1 rnaseq desc 1  
+AUC
+>rnaseq-2        rnaseq desc 2
+AUG
diff --git a/skbio/io/tests/data/fasta_single_bio_seq_defaults b/skbio/io/tests/data/fasta_single_bio_seq_defaults
new file mode 100644
index 0000000..fd6d562
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_bio_seq_defaults
@@ -0,0 +1,2 @@
+>f_o_o b a r
+ACGT
diff --git a/skbio/io/tests/data/fasta_single_bio_seq_non_defaults b/skbio/io/tests/data/fasta_single_bio_seq_non_defaults
new file mode 100644
index 0000000..aed57ca
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_bio_seq_non_defaults
@@ -0,0 +1,5 @@
+>f-o-o b_a_r
+A
+C
+G
+T
diff --git a/skbio/io/tests/data/fasta_single_dna_seq_defaults b/skbio/io/tests/data/fasta_single_dna_seq_defaults
new file mode 100644
index 0000000..7381787
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_dna_seq_defaults
@@ -0,0 +1,2 @@
+>f_o_o b a r
+TACG
diff --git a/skbio/io/tests/data/fasta_single_dna_seq_non_defaults b/skbio/io/tests/data/fasta_single_dna_seq_non_defaults
new file mode 100644
index 0000000..3743565
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_dna_seq_non_defaults
@@ -0,0 +1,5 @@
+>f-o-o b_a_r
+T
+A
+C
+G
diff --git a/skbio/io/tests/data/fasta_single_nuc_seq_defaults b/skbio/io/tests/data/fasta_single_nuc_seq_defaults
new file mode 100644
index 0000000..05481e6
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_nuc_seq_defaults
@@ -0,0 +1,2 @@
+>f_o_o b a r
+ACGTU
diff --git a/skbio/io/tests/data/fasta_single_nuc_seq_non_defaults b/skbio/io/tests/data/fasta_single_nuc_seq_non_defaults
new file mode 100644
index 0000000..e8a4072
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_nuc_seq_non_defaults
@@ -0,0 +1,6 @@
+>f-o-o b_a_r
+A
+C
+G
+T
+U
diff --git a/skbio/io/tests/data/fasta_single_prot_seq_defaults b/skbio/io/tests/data/fasta_single_prot_seq_defaults
new file mode 100644
index 0000000..6ef6b37
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_prot_seq_defaults
@@ -0,0 +1,2 @@
+>f_o_o b a r
+PQQ
diff --git a/skbio/io/tests/data/fasta_single_prot_seq_non_defaults b/skbio/io/tests/data/fasta_single_prot_seq_non_defaults
new file mode 100644
index 0000000..a031313
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_prot_seq_non_defaults
@@ -0,0 +1,4 @@
+>f-o-o b_a_r
+P
+Q
+Q
diff --git a/skbio/io/tests/data/fasta_single_rna_seq_defaults b/skbio/io/tests/data/fasta_single_rna_seq_defaults
new file mode 100644
index 0000000..738358c
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_rna_seq_defaults
@@ -0,0 +1,2 @@
+>f_o_o b a r
+UACG
diff --git a/skbio/io/tests/data/fasta_single_rna_seq_non_defaults b/skbio/io/tests/data/fasta_single_rna_seq_non_defaults
new file mode 100644
index 0000000..a43fd53
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_rna_seq_non_defaults
@@ -0,0 +1,5 @@
+>f-o-o b_a_r
+U
+A
+C
+G
diff --git a/skbio/io/tests/data/fasta_single_seq b/skbio/io/tests/data/fasta_single_seq
new file mode 100644
index 0000000..fd55b67
--- /dev/null
+++ b/skbio/io/tests/data/fasta_single_seq
@@ -0,0 +1,2 @@
+>seq1 desc1
+ACGT-acgt.
diff --git a/skbio/io/tests/data/fastq_invalid_missing_header b/skbio/io/tests/data/fastq_invalid_missing_header
new file mode 100644
index 0000000..6541afc
--- /dev/null
+++ b/skbio/io/tests/data/fastq_invalid_missing_header
@@ -0,0 +1,4 @@
+seq1 desc1
+ACGT
++
+1234
diff --git a/skbio/io/tests/data/fastq_invalid_missing_seq_data b/skbio/io/tests/data/fastq_invalid_missing_seq_data
new file mode 100644
index 0000000..3edb1a3
--- /dev/null
+++ b/skbio/io/tests/data/fastq_invalid_missing_seq_data
@@ -0,0 +1,13 @@
+ at seq-1 first sequence
+ACG
+T
++
+[[[[
+ at seq-2 second sequence
+T
+GCAC
++
+[[[[[
+ at seq-3 third sequence
++
+[[[[[
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/tests/data/fastq_multi_seq_sanger
new file mode 100644
index 0000000..a2b5187
--- /dev/null
+++ b/skbio/io/tests/data/fastq_multi_seq_sanger
@@ -0,0 +1,12 @@
+ at foo bar baz
+AACCGG
++
+123456
+ at bar baz foo
+TTGGCC
++
+876543
+ at baz foo bar
+GATTTC
++
+567893
diff --git a/skbio/io/tests/data/fastq_single_seq_illumina1.3 b/skbio/io/tests/data/fastq_single_seq_illumina1.3
new file mode 100644
index 0000000..f5de950
--- /dev/null
+++ b/skbio/io/tests/data/fastq_single_seq_illumina1.3
@@ -0,0 +1,10 @@
+@ 	 bar	 baz  
+A
+C
+G
+T
++
+a
+b
+c
+d
diff --git a/skbio/io/tests/data/fastq_wrapping_as_illumina_no_description b/skbio/io/tests/data/fastq_wrapping_as_illumina_no_description
new file mode 100644
index 0000000..cb10fbd
--- /dev/null
+++ b/skbio/io/tests/data/fastq_wrapping_as_illumina_no_description
@@ -0,0 +1,12 @@
+ at SRR014849.50939
+GAAATTTCAGGGCCACCTTTTTTTTGATAGAATAATGGAGAAAATTAAAAGCTGTACATATACCAATGAACAATAAATCAATACATAAAAAAGGAGAAGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTCGG
++
+Zb^Ld`N\[d`NaZ[aZc]UOKHDA[\YT[_W[aZ\aZ[Zd`SF_WeaUI[Y\[[\\\[\Z\aY`X[[aZ\aZ\d`OY[aY[[\[[e`WPJC^UZ[`X\[R]T_V_W[`[Ga\I`\H[[Q^TVa\Ia\Ic^LY\S
+ at SRR014849.110027
+CTTCAAATGATTCCGGGACTGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTTCGGTTCCAACTCGCCGTCCGAATAATCCGTTCAAAATCTTGGCCTGTCAAAACGACTTTACGACCAGAACGATCCG
++
+\aYY_[FY\T`X^Vd`OY\[[^U_V[R^T[_ZDc^La\HYYO\S[c^Ld`Nc_QAZaZaYaY`XZZ\[aZZ[aZ[aZ[aZY`Z[`ZWeaVJ\[aZaY`X[PY\eaUG[\[[d`OXTUZ[Q\\`W\\\Y_W\
+ at SRR014849.203935
+AACCCGTCCCATCAAAGATTTTGGTTGGAACCCGAAAGGGTTTTGAATTCAAACCCCTTTCGGTTCCAACTATTCAATTGTTTAACTTTTTTTAAATTGATGGTCTGTTGGACCATTTGTAATAATCCCCATCGGAATTTCTTT
++
+`Z_ZDVT^YB[[Xd`PZ\d`RDaZaZ`ZaZ_ZDXd`Pd`Pd`RD[aZ`ZWd`Oc_RCd`P\aZ`ZaZaZY\YaZYaY`XYd`O`X[e`WPJEAc^LaZS[YYN[Z\Y`XWLT^U\b]JW[[RZ\SYc`RD[Z\WLXM`\HYa\I
diff --git a/skbio/io/tests/data/fastq_wrapping_as_sanger_no_description b/skbio/io/tests/data/fastq_wrapping_as_sanger_no_description
new file mode 100644
index 0000000..8b63c3c
--- /dev/null
+++ b/skbio/io/tests/data/fastq_wrapping_as_sanger_no_description
@@ -0,0 +1,12 @@
+ at SRR014849.50939
+GAAATTTCAGGGCCACCTTTTTTTTGATAGAATAATGGAGAAAATTAAAAGCTGTACATATACCAATGAACAATAAATCAATACATAAAAAAGGAGAAGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTCGG
++
+;C?-EA/=<EA/B;<B;D>60,)%"<=:5<@8<B;=B;<;EA4'@8FB6*<:=<<===<=;=B:A9<<B;=B;=EA0:<B:<<=<<FA81+$?6;<A9=<3>5 at 7@8<A<(B=*A=)<<2?57B=*B=*D?-:=4
+ at SRR014849.110027
+CTTCAAATGATTCCGGGACTGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTTCGGTTCCAACTCGCCGTCCGAATAATCCGTTCAAAATCTTGGCCTGTCAAAACGACTTTACGACCAGAACGATCCG
++
+=B::@<':=5A9?7EA0:=<<?6 at 7<3?5<@;%D?-B=)::0=4<D?-EA/D at 2";B;B:B:A9;;=<B;;<B;<B;<B;:A;<A;8FB7+=<B;B:A9<1:=FB6(<=<<EA0956;<2==A8===:@8=
+ at SRR014849.203935
+AACCCGTCCCATCAAAGATTTTGGTTGGAACCCGAAAGGGTTTTGAATTCAAACCCCTTTCGGTTCCAACTATTCAATTGTTTAACTTTTTTTAAATTGATGGTCTGTTGGACCATTTGTAATAATCCCCATCGGAATTTCTTT
++
+A;@;%75?:#<<9EA1;=EA3%B;B;A;B;@;%9EA1EA1EA3%<B;A;8EA0D at 3$EA1=B;A;B;B;:=:B;:B:A9:EA0A9<FA81+&"D?-B;4<::/<;=:A98-5?6=C>+8<<3;=4:DA3%<;=8-9.A=):B=*
diff --git a/skbio/io/tests/data/fastq_wrapping_original_sanger_no_description b/skbio/io/tests/data/fastq_wrapping_original_sanger_no_description
new file mode 100644
index 0000000..39bd677
--- /dev/null
+++ b/skbio/io/tests/data/fastq_wrapping_original_sanger_no_description
@@ -0,0 +1,24 @@
+ at SRR014849.50939
+GAAATTTCAGGGCCACCTTTTTTTTGATAGAATAATGGAGAAAATTAAAAGCTGTACATATACCAATGAACAATAAATCAATACATAAAAAAGGAGAAGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTCGG
++
+;C?-EA/=<EA/B;<B;D>60,)%"<=:5<
+ at 8<B;=B;<;EA4'@8FB6*<:=<<===<=
+;=B:A9<<B;=B;=EA0:<B:<<=<<FA81
++$?6;<A9=<3>5 at 7@8<A<(B=*A=)<<2
+?57B=*B=*D?-:=4
+ at SRR014849.110027 
+CTTCAAATGATTCCGGGACTGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTTCGGTTCCAACTCGCCGTCCGAATAATCCGTTCAAAATCTTGGCCTGTCAAAACGACTTTACGACCAGAACGATCCG
++
+=B::@<':=5A9?7EA0:=<<?6 at 7<3?5<
+@;%D?-B=)::0=4<D?-EA/D at 2";B;B:
+B:A9;;=<B;;<B;<B;<B;:A;<A;8FB7
++=<B;B:A9<1:=FB6(<=<<EA0956;<2
+==A8===:@8=
+ at SRR014849.203935
+AACCCGTCCCATCAAAGATTTTGGTTGGAACCCGAAAGGGTTTTGAATTCAAACCCCTTTCGGTTCCAACTATTCAATTGTTTAACTTTTTTTAAATTGATGGTCTGTTGGACCATTTGTAATAATCCCCATCGGAATTTCTTT
++
+A;@;%75?:#<<9EA1;=EA3%B;B;A;B;
+@;%9EA1EA1EA3%<B;A;8EA0D at 3$EA1
+=B;A;B;B;:=:B;:B:A9:EA0A9<FA81
++&"D?-B;4<::/<;=:A98-5?6=C>+8<
+<3;=4:DA3%<;=8-9.A=):B=*
diff --git a/skbio/io/tests/data/fastq_writer_illumina1.3_defaults b/skbio/io/tests/data/fastq_writer_illumina1.3_defaults
new file mode 100644
index 0000000..445cf69
--- /dev/null
+++ b/skbio/io/tests/data/fastq_writer_illumina1.3_defaults
@@ -0,0 +1,12 @@
+ at f_o__o bar  baz
+AACCGG
++
+PQRSTU
+ at bar baz foo
+TTGGCC
++
+WVUTSR
+ at ba___z foo bar
+GATTTC
++
+TUVWXR
diff --git a/skbio/io/tests/data/fastq_writer_sanger_defaults b/skbio/io/tests/data/fastq_writer_sanger_defaults
new file mode 100644
index 0000000..14cc74b
--- /dev/null
+++ b/skbio/io/tests/data/fastq_writer_sanger_defaults
@@ -0,0 +1,12 @@
+ at f_o__o bar  baz
+AACCGG
++
+123456
+ at bar baz foo
+TTGGCC
++
+876543
+ at ba___z foo bar
+GATTTC
++
+567893
diff --git a/skbio/io/tests/data/fastq_writer_sanger_non_defaults b/skbio/io/tests/data/fastq_writer_sanger_non_defaults
new file mode 100644
index 0000000..0bb9718
--- /dev/null
+++ b/skbio/io/tests/data/fastq_writer_sanger_non_defaults
@@ -0,0 +1,12 @@
+ at f%o%%o bar^^baz
+AACCGG
++
+123456
+ at bar baz foo
+TTGGCC
++
+876543
+ at ba%%%z foo bar
+GATTTC
++
+567893
diff --git a/skbio/io/tests/data/illumina_full_range_as_illumina.fastq b/skbio/io/tests/data/illumina_full_range_as_illumina.fastq
new file mode 100644
index 0000000..48745c0
--- /dev/null
+++ b/skbio/io/tests/data/illumina_full_range_as_illumina.fastq
@@ -0,0 +1,8 @@
+ at FAKE0005 Original version has PHRED scores from 0 to 62 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACG
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+ at FAKE0006 Original version has PHRED scores from 62 to 0 inclusive (in that order)
+GCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCA
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@
diff --git a/skbio/io/tests/data/illumina_full_range_as_sanger.fastq b/skbio/io/tests/data/illumina_full_range_as_sanger.fastq
new file mode 100644
index 0000000..6dac209
--- /dev/null
+++ b/skbio/io/tests/data/illumina_full_range_as_sanger.fastq
@@ -0,0 +1,8 @@
+ at FAKE0005 Original version has PHRED scores from 0 to 62 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACG
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+ at FAKE0006 Original version has PHRED scores from 62 to 0 inclusive (in that order)
+GCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCA
++
+_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff --git a/skbio/io/tests/data/illumina_full_range_original_illumina.fastq b/skbio/io/tests/data/illumina_full_range_original_illumina.fastq
new file mode 100644
index 0000000..48745c0
--- /dev/null
+++ b/skbio/io/tests/data/illumina_full_range_original_illumina.fastq
@@ -0,0 +1,8 @@
+ at FAKE0005 Original version has PHRED scores from 0 to 62 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACG
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+ at FAKE0006 Original version has PHRED scores from 62 to 0 inclusive (in that order)
+GCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCA
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@
diff --git a/skbio/io/tests/data/longreads_as_illumina.fastq b/skbio/io/tests/data/longreads_as_illumina.fastq
new file mode 100644
index 0000000..c2c51b0
--- /dev/null
+++ b/skbio/io/tests/data/longreads_as_illumina.fastq
@@ -0,0 +1,40 @@
+ at FSRRS4401BE7HA [length=395] [gc=36.46] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=95]
+tcagTTAAGATGGGATAATATCCTCAGATTGCGTGATGAACTTTGTTCTGGTGGAGGAGAAGGAAGTGCATTCGACGTATGCCCGTTTGTCGATATTTGtatttaaagtaatccgtcacaaatcagtgacataaatattatttagatttcgggagcaactttatttattccacaagcaggtttaaattttaaatttaaattattgcagaagactttaaattaacctcgttgtcggagtcatttgttcggttattggtcgaaagtaaccncgggaagtgccgaaaactaacaaacaaaagaagatagtgaaattttaattaaaanaaatagccaaacgtaactaactaaaacggacccgtcgaggaactgccaacggacgacacagggagtagnnn
++
+eeeccccccc`UUU^UWWeegffhhhhhhhhhhhhhhhhhhggghhhhhhhhhfgfeeeee\\\\ceeeeeeeeeeeeeec^^^YRPOSNVU\YTMMMSMRKKKRUUNNNNS[`aa```\bbeccccccccYUUUbceeee\[`a`\ZYRRRPPP[\\\XXZaWWXeeeeeeccacaccc\WWSSQRPMMKKKLKKKKKKKKPPRRMMLLLPVPPPKKKKKQQTTTPRPPQPMLLMKRRRPPKMKKRLLKKMKKLLKRTPPPQRMMLL at KKKKLLKLLLLXKKKKW\KKLKKKLKKKKLLLQUYXYTLMMPKKKKPPNNKKKK at KKPXPVLLKKKKLRMKLLKKPVKKKKLLLJPPPPRMOROOOOKKKOSSSOOORUZXUUUQMNNZV][Z@@@
+ at FSRRS4401BRRTC [length=145] [gc=38.62] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=74]
+tcagCCAGCAATTCCGACTTAATTGTTCTTCTTCCATCATTCATCTCGACTAACAGTTCTACGATTAATGAGTTTGGCtttaatttgttgttcattattgtcacaattacactactgagactgccaaggcacncagggataggnn
++
+eeeeeeeeecccceeeefecccca`````\[SSSS__a\TTTYaaaaa__^WYW[^[WXWXW[WSSSQZ\\RKKKTPSKKKPPKKKMKKQPVVVTTTTPRKMMLLPPPTVTWMNNRSSWW][[ZZZZXXSSN at NSKKKTVWTT@@
+ at FSRRS4401B64ST [length=382] [gc=40.58] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=346]
+tcagTTTTCTTAAATTACTTGAATCTGTTGAAGTGGATGTCCACTTTTGTATGCCAAATATGCCCAGCGTATACGATCTTGGCCACATCTCCACATAATCATCAGTCGGATGCAAAAAGCGATTAAACTAAAAATGAATGCGTTTTTAGATGAGTAAATAGGTAATACTTTGTTTAAATAATAAATGTCACAAACAGAACGCGGATTACAGTACCTGAAAATAGTTGTACTGTATCTGTGCCGGCACTTCCTCGGCCCTGAGAAGTTGTCCCGTTGTTTCCATTCGCACCATCCAATGGCCAAAGTTTGCGAAGAATCTGTTCCGTTCCATTACCAATTGTTTTTCCATGctgagactgccaaggcacacaggggataggnn
++
+hhhhbbbbh^^UUUhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhUUUUh`hhhhh^^^hhhhbbbhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhUURRRdhbdYYRRW\NLLLLKW\]]^^YQLNNNNV]bddhdhggghhhhhhhhhdZZXXPPPXXa^^^habghhhhhhggghhggghhhhhhhhhhhhhhhhhhaabbhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhfffhhhhhhhhhc^\\\chhhggghhhhhhhhhggghhhhhhhhhhggghggghhhhhhhhhhhhhhhhhhhhhh^]ZXXWW\\TLLLLM__`dfhhhhhhhhhgg^^^^dhhheeXXXZdhhaa@@
+ at FSRRS4401EJ0YH [length=381] [gc=48.29] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=343]
+tcagTTTTTGGAGAATTCCGTCAGGGACGGCATGGCATATTTGTGGGTTCGGCACGGCGTCCTGGCCAAGAAGAAGAAGACGAATTAGCCCGTTAATTTAATGACACCTTCCCCAATTTTGCAGCAATGATTGGTTCATTCTTGGCGGTGCGTTTTTGTGCTTCGTCGAATTGTTGGCCATTTTGGTCCACCGGCCATCATCTTTACGCTATCCGACTGATTGGAAATCACCGCCTAGCATTTTGCCGAAGATTGTTGCGTTGTACGGCCATGTGCTGATTGTTTACATTGGCATTCTTGGCAATTTGTCCTTGGTCGGCTTTGACGGCAAATTTGCGGTGTTAAGTctgagactgccaaggcacacagggggatagggnn
++
+hhhh^^^^^hhhhhhhhhhhhhhggghhhhhhhhhhhhhggghhggghhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhggghhhhhggghhhhhhhhhhh````hh]]]]hhhhhhhhhhhhhhhhhhhhhhhhhhddhddZRRRRRcVhhhhhhhhhhhhhhhhhhhhhbb__gghhhhhhhhhhhhhhhhggghhhhhhhhhhhhhhhhhhhggghhhhhhhhhhhhhaaaahgbcbghhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhggghhhggbbchhhhhhggghhbbbg\bbhhhhhhhhhfffhhhhhhgggggghhhhhhhhhhhhhhhggghhggd^^]]^dedd^NNNNNZYWOLL@@
+ at FSRRS4401BK0IB [length=507] [gc=49.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=208]
+tcagTTGACCGGCGTTGTGTAACAATAATTCATTATTCTGAGACGATGCCAATGTAATCGACGGTTTATGCCCAATTATTCCCATCTATGCTTAACTGATCAAATACTATTTGCATTACGTCACGAAATTGCGCGAACACCGCCGGCCGACAATAATTTATACCGGACATACCGGAGTTGATGGTAATCGGTAAAGAGTTTTATTTAATTATntattatcnctattaattattgttancaacaatgtgcacgctntgccgcccgccgccgccgtgtcggtaggaccccggacggacccggacccggttcgggtacccgttttcgggttcccggaaccgtttttcgggtacccggttttttcggggggccccccggtaaaaaaccggggaaccccctaaaacgggtaaacgtaccgtaagggaccccctaaacgggggccccgaaaaaccgggacccaaaccggggggaaacggttaaaggggggggaagtag [...]
++
+eee`__eeeeeeeeeeggaYYY_aeeeeffghghhhhhhhhhhhhhhhhhhhhhhheeeeeeeee^\a`_PPPWWOPP[[WWZ^``accb^^^cc````c`UUUc^ccc\\\\\``]^]][[[\[PPPWW[[^^^``^XTTT\`aaa__^\]]^__PPPSQYYcc`^^^ceeeeeeeeeeeeea````[[OOOOMQQ\NNNNWKLLPPPPPP at QRLLNQS@RVYUUUU[ZWQQNMMS at SUTQPPVVTTRMLMQR@QRPPQPPPQKKLKKQPP\\TLLLLLLKPQKKKKKKLKKKLPKKKKLKKPTTLLKKKKPRPPPMKKKKKKKKJJPPPMMPPMMPKKKKKKKKKJRKKKKKLLQQLLLLLNNLLLLTTNNIINLLQQLLIIKKKKIIKKKKKKMPMKIKKKKIIIKKKKKKKKKKKKKKKKKKKKKKKHKKLKKKKKKHKKKKKIINNMHKKKNNNKKKKKKKKKKKMHHRRLLLKKKKKKKKKKGOKK [...]
+ at FSRRS4401ARCCB [length=258] [gc=46.90] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=193]
+tcagTTATTGCAGTCGTTCCGCGCCATCGCCGGTAACCGTCCGCGTGTTATTCTGTGTATCGGCCAACCTTCGTATAACTTCGTATAATGTATGCTATACGAAGTTATTACGATCTATACCGGCGAAACTCAGCCGAAAGGTCTCGCGGTAGAGCCTATGAGCTGCCCGACCGATGCATTTAAATTTCCGGGGATCGtcgctgatctgagactgccaaaggcacactagggggataggnnnnnnnnnnnnnnnnnnnn
++
+eee[WYY_ceeeeeeeffecb`````a__OOOOSU[ZUURQQRUUVUQQSRRSW[[\^^SSSTYY]`a```_[[\\a\YTTTYaac^^\acccceeebbbbbbbeebccceeeeeca``\\WWWWOOOS][[[XXUWWZWWX[WWX^aaaa`\^^^ccaaa__^^WWWWXLLLQRVVVPKKKKKKKKLLPPTQ[[OOPTW`_][[[[[SRQVVVPPKKKLLRV\\\VTKLLLLRSUUU@@@@@@@@@@@@@@@@@@@@
+ at FSRRS4401CM938 [length=453] [gc=44.15] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=418]
+tcagGTTTTAAATCGCTTTCCAAGGAATTTGAGTCTAAATCCGGTGGATCCCATCAGTACAAATGCGGCGACAAGGCCGTGAAAACACTGCTTAATTCTTTGCACTTTTTGGCCACCTTTTTGGAAATGTTGTTTTGTGTTCTCAAAATTTTCCATCTCAGAACAAACATTCCATCGGGCTGATGTTGTGGCTTTTGGCGCGCGAAGTGCTGCTACTGCGCGGCAAAATCAGTCGCCAGACCGGTTTTGTTGTGGACGACAAAGTGATCATGCCTGACTTGTACTTCTACCGCGATCCGCAAGCGCGAATTGGTCACATAGTTATAGAATTTTTGAGCCTTTTTCTTGACATAAAAAGTGTGGTTTTAAAAATTTCCTGGCAGGACCCACGCCAACGTTCAGGAATAATATCTTTTAAAAAGctgagactgccaaggcacacaggggataggn
++
+hhhhhbb]]UUUhhhhbbbhhhhhhhhggghhhhhfUUUhhhhhhhhhhggghhhhhhhhbbbhhhhhhhhhhhhhhhhhh____hhhhhhhhhhhhhggghhhh^^^\ZhhddhYYNNNNNVTSSY^haaVQQSSdWddbdab\_______gghhhhhhhhhhaaahhhhhhhhhggghhhhhhhhhhhhhbbbbhhhhhhhhhhhhhhhhhhhhhhhhhhhhUUUUcdhhgda^^c_VVVVVQQQQcWXddhhhhhhhggghhhhhhhhggghhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhggghhhhhhhhhhhhhhh\\^\\hhhhh^^^\ZhURcccWQLLKKKRW\\YYLLLLKKLLLJJJRROUUZ_URWOOOWNYWWX[Yafhhhhhhhhhed[^eTTOOLLLLLTYZZZY]^_b[[VXXXdddddd____ddddd@
+ at FSRRS4401EQLIK [length=411] [gc=34.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=374]
+tcagTTTAATTTGGTGCTTCCTTTCAATTCCTTAGTTTAAACTTGGCACTGAAGTCTCGCATTTATAACTAGAGCCCGGATTTTAGAGGCTAAAAAGTTTTCCAGATTTCAAAATTTATTTCGAAACTATTTTTCTGATTGTGATGTGACGGATTTCTAAATTAAATCGAAATGATGTGTATTGAACTTAACAAGTGATTTTTATCAGATTTTGTCAATGAATAAATTTTAATTTAAATCTCTTTCTAACACTTTCATGATTAAAATCTAACAAAGCGCGACCAGTATGTGAGAAGAGCAAAAACAACAAAAAGTGCTAGCACTAAAGAAGGTTCGAACCCAACACATAACGTAAGAGTTACCGGGAAGAAAACCACTctgagactgccaaggcacacagggggataggnn
++
+hhh^UUU^^ggghhhhhhhhhfffhhhhhhhhhhhfffggghhhhhhhhhhhhhhhhhhhhfffhhhhhhhhhhggghhh____hhhhdhdPPPPOOLLLLQQ^\WLLLYLLLLLLLKKKKRRLLLTYRKLLLLYPaadddghhhhhhhhhhha^^`PQQOOOMMMY^\OQSfhhhhhhhhhhhhhhhhhhdbbgga\NNLKKQP^^[TLOOQ\Ueaa^YX[\PPNSSSSNNLNNVV^^fdhddgh`bbhhhggghhhhhhhbbb`hhhgggggghhhhhhhhhhhhhhhhhhhhhhddPNNLLWQQZLLLLMVVV_dhhhhhh^^^hhhhhhhhhhhggghhhhhhhhhhhhhhhhhhhhXXSQQVVVTTTT`dZhdddddhhhhh^^XVTT]_\\YRKKKKKRRRRU@@
+ at FSRRS4401AOV6A [length=309] [gc=22.98] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=273]
+tcagTTTTCAAATTTTCCGAAATTTGCTGTTTGGTAGAAGGCAAATTATTTGATTGAATTTTGTATTTATTTAAAACAATTTATTTTAAAATAATAATTTTCCATTGACTTTTTACATTTAATTGATTTTATTATGCATTTTATATTTGTTTTCTAAATATTCGTTTGCAAACTCACGTTGAAATTGTATTAAACTCGAAATTAGAGTTTTTGAAATTAATTTTTATGTAGCATAATATTTTAAACATATTGGAATTTTATAAAACATTATATTTTTctgagactgccaaggcacacagggggataggn
++
+hhhhbbbbhZZZbbbbhhh^^^ggghhhhggghhhhhhhhhhggghhhggghhhhhhh____hehbbbhb``ZZZZdc^a__cUUSSTTTT[[[fhh]]``hhhhhhhhZZZYYhhh^^^bbbhhhZZZZheehhhhhbbbahahddcbSSSS^Saaad^dhhhbgghhZZZghhhhhhggZZZgghhhhhZZZhhhhggghhhhhh]]^^]hddaffYYPPPPNSUeaeaa^\Z\`^XVVVPPPXYd```ccacVVVV\NPPPPQQc`__aUWZZZhWgghhhhhZZZZ^]hdbbbaNNNNNZVST\@
+ at FSRRS4401EG0ZW [length=424] [gc=23.82] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=389]
+tcagTTTTGATCTTTTAATAATGAATTTTAATGTGTTAAAATGATTGCATTGATGGCATAACCGCATTTAAATTAATTACATGAAGTGTAAGTATGAAATTTTCCTTTCCAAATTGCAAAAACTAAAATTTAAAATTTATCGTAAAAATTAACATATATTTTAAACGATTTTAAGAAACATTTGTAAATTATATTTTTGTGAAGCGTTCAAACAAAAATAAACAATAAAATATTTTTCTATTTAATAGCAAAACATTTGACGATGAAAAGGAAAATGCGGGTTTGAAAATGGGCTTTGCCATGCTATTTTCATAATAACATATTTTTATTATGAATAATAAATTTACATACAATATATACAGTCTTAAATTTATTCATAATATTTTTGAGAATctgagactgccaaggcacacaggggataggn
++
+hh`XSSSTddhh\\\]hhhhhhhhhbbbbhhghhhbbZZZZhhhhhhhhhhhhhhhhhhhhhhhhheZZUUUcchhhhhhhhhhhhhhhhhhhddXSSSQQSS__UUUbb[[acc`\LLLLLQ[KKKKUTXNNOO\\\WbhhhZ]]\\ggZZhhhhhhbb__^^^hhh____hb^UUUghccbh^a^^bb[ddPPPPPaSaccbaZ\_aVVV]NNNNL\RQR^SQRKKKN\PKKKKLYSdZ^^dhhhhhbbbbh]ZZZhhhhhhh[[__^\NNNNV\`XXXWW[[SSTThdddhhhhhhhhhhhhh[XXXghhhhhhhhhhh^^^^^hhhhhhhhhhhb`bZTTTRXdhhhhhhhhhhhhhhhhggXXXgggh`\`ddee_\MMMMM`c___ccddddehhhZZZXVVeebbb_QSSSX^ecc@
diff --git a/skbio/io/tests/data/longreads_as_sanger.fastq b/skbio/io/tests/data/longreads_as_sanger.fastq
new file mode 100644
index 0000000..7a3ec93
--- /dev/null
+++ b/skbio/io/tests/data/longreads_as_sanger.fastq
@@ -0,0 +1,40 @@
+ at FSRRS4401BE7HA [length=395] [gc=36.46] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=95]
+tcagTTAAGATGGGATAATATCCTCAGATTGCGTGATGAACTTTGTTCTGGTGGAGGAGAAGGAAGTGCATTCGACGTATGCCCGTTTGTCGATATTTGtatttaaagtaatccgtcacaaatcagtgacataaatattatttagatttcgggagcaactttatttattccacaagcaggtttaaattttaaatttaaattattgcagaagactttaaattaacctcgttgtcggagtcatttgttcggttattggtcgaaagtaaccncgggaagtgccgaaaactaacaaacaaaagaagatagtgaaattttaattaaaanaaatagccaaacgtaactaactaaaacggacccgtcgaggaactgccaacggacgacacagggagtagnnn
++
+FFFDDDDDDDA666?688FFHGGIIIIIIIIIIIIIIIIIIHHHIIIIIIIIIGHGFFFFF====DFFFFFFFFFFFFFFD???:3104/76=:5...4.3,,,366////4<ABBAAA=CCFDDDDDDDD:666CDFFFF=<ABA=;:333111<===99;B889FFFFFFDDBDBDDD=8844231..,,,-,,,,,,,,1133..---17111,,,,,22555131121.--.,33311,.,,3--,,.,,--,3511123..--!,,,,--,----9,,,,8=,,-,,,-,,,,---26:9:5-..1,,,,11//,,,,!,,1917--,,,,-3.,--,,17,,,,---+11113.030000,,,044400036;96662.//;7><;!!!
+ at FSRRS4401BRRTC [length=145] [gc=38.62] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=74]
+tcagCCAGCAATTCCGACTTAATTGTTCTTCTTCCATCATTCATCTCGACTAACAGTTCTACGATTAATGAGTTTGGCtttaatttgttgttcattattgtcacaattacactactgagactgccaaggcacncagggataggnn
++
+FFFFFFFFFDDDDFFFFGFDDDDBAAAAA=<4444@@B=555:BBBBB@@?8:8<?<89898<84442;==3,,,514,,,11,,,.,,21777555513,..--1115758.//34488><<;;;;9944/!/4,,,57855!!
+ at FSRRS4401B64ST [length=382] [gc=40.58] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=346]
+tcagTTTTCTTAAATTACTTGAATCTGTTGAAGTGGATGTCCACTTTTGTATGCCAAATATGCCCAGCGTATACGATCTTGGCCACATCTCCACATAATCATCAGTCGGATGCAAAAAGCGATTAAACTAAAAATGAATGCGTTTTTAGATGAGTAAATAGGTAATACTTTGTTTAAATAATAAATGTCACAAACAGAACGCGGATTACAGTACCTGAAAATAGTTGTACTGTATCTGTGCCGGCACTTCCTCGGCCCTGAGAAGTTGTCCCGTTGTTTCCATTCGCACCATCCAATGGCCAAAGTTTGCGAAGAATCTGTTCCGTTCCATTACCAATTGTTTTTCCATGctgagactgccaaggcacacaggggataggnn
++
+IIIICCCCI??666IIIIIIIIIIIIIIIIIIIIIIIIIIIIII6666IAIIIII???IIIICCCIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII66333EICE::338=/----,8=>>??:2-////7>CEEIEIHHHIIIIIIIIIE;;9911199B???IBCHIIIIIIHHHIIHHHIIIIIIIIIIIIIIIIIIBBCCIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIGGGIIIIIIIIID?===DIIIHHHIIIIIIIIIHHHIIIIIIIIIIHHHIHHHIIIIIIIIIIIIIIIIIIIIII?>;9988==5----.@@AEGIIIIIIIIIHH????EIIIFF999;EIIBB!!
+ at FSRRS4401EJ0YH [length=381] [gc=48.29] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=343]
+tcagTTTTTGGAGAATTCCGTCAGGGACGGCATGGCATATTTGTGGGTTCGGCACGGCGTCCTGGCCAAGAAGAAGAAGACGAATTAGCCCGTTAATTTAATGACACCTTCCCCAATTTTGCAGCAATGATTGGTTCATTCTTGGCGGTGCGTTTTTGTGCTTCGTCGAATTGTTGGCCATTTTGGTCCACCGGCCATCATCTTTACGCTATCCGACTGATTGGAAATCACCGCCTAGCATTTTGCCGAAGATTGTTGCGTTGTACGGCCATGTGCTGATTGTTTACATTGGCATTCTTGGCAATTTGTCCTTGGTCGGCTTTGACGGCAAATTTGCGGTGTTAAGTctgagactgccaaggcacacagggggatagggnn
++
+IIII?????IIIIIIIIIIIIIIHHHIIIIIIIIIIIIIHHHIIHHHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIHHHIIIIIHHHIIIIIIIIIIIAAAAII>>>>IIIIIIIIIIIIIIIIIIIIIIIIIIEEIEE;33333D7IIIIIIIIIIIIIIIIIIIIICC@@HHIIIIIIIIIIIIIIIIHHHIIIIIIIIIIIIIIIIIIIHHHIIIIIIIIIIIIIBBBBIHCDCHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIHHHIIIHHCCDIIIIIIHHHIICCCH=CCIIIIIIIIIGGGIIIIIIHHHHHHIIIIIIIIIIIIIIIHHHIIHHE??>>?EFEE?/////;:80--!!
+ at FSRRS4401BK0IB [length=507] [gc=49.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=208]
+tcagTTGACCGGCGTTGTGTAACAATAATTCATTATTCTGAGACGATGCCAATGTAATCGACGGTTTATGCCCAATTATTCCCATCTATGCTTAACTGATCAAATACTATTTGCATTACGTCACGAAATTGCGCGAACACCGCCGGCCGACAATAATTTATACCGGACATACCGGAGTTGATGGTAATCGGTAAAGAGTTTTATTTAATTATntattatcnctattaattattgttancaacaatgtgcacgctntgccgcccgccgccgccgtgtcggtaggaccccggacggacccggacccggttcgggtacccgttttcgggttcccggaaccgtttttcgggtacccggttttttcggggggccccccggtaaaaaaccggggaaccccctaaaacgggtaaacgtaccgtaagggaccccctaaacgggggccccgaaaaaccgggacccaaaccggggggaaacggttaaaggggggggaagtag [...]
++
+FFFA@@FFFFFFFFFFHHB:::@BFFFFGGHIHIIIIIIIIIIIIIIIIIIIIIIIFFFFFFFFF?=BA at 11188011<<88;?AABDDC???DDAAAADA666D?DDD=====AA>?>><<<=<11188<<???AA?9555=ABBB@@?=>>?@@11142::DDA???DFFFFFFFFFFFFFBAAAA<<0000.22=////8,--111111!23--/24!37:6666<;822/..4!46521177553.-.23!231121112,,-,,211==5------,12,,,,,,-,,,-1,,,,-,,155--,,,,13111.,,,,,,,,++111..11..1,,,,,,,,,+3,,,,,--22-----//----55//**/--22--**,,,,**,,,,,,.1.,*,,,,***,,,,,,,,,,,,,,,,,,,,,,,),,-,,,,,,),,,,,**//.),,,///,,,,,,,,,,,.))33---,,,,,,,,,,(0,, [...]
+ at FSRRS4401ARCCB [length=258] [gc=46.90] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=193]
+tcagTTATTGCAGTCGTTCCGCGCCATCGCCGGTAACCGTCCGCGTGTTATTCTGTGTATCGGCCAACCTTCGTATAACTTCGTATAATGTATGCTATACGAAGTTATTACGATCTATACCGGCGAAACTCAGCCGAAAGGTCTCGCGGTAGAGCCTATGAGCTGCCCGACCGATGCATTTAAATTTCCGGGGATCGtcgctgatctgagactgccaaaggcacactagggggataggnnnnnnnnnnnnnnnnnnnn
++
+FFF<8::@DFFFFFFFGGFDCAAAAAB@@000046<;66322366762243348<<=??4445::>ABAAA@<<==B=:555:BBD??=BDDDDFFFCCCCCCCFFCDDDFFFFFDBAA==88880004><<<99688;889<889?BBBBA=???DDBBB@@??88889---237771,,,,,,,,--1152<<00158A@><<<<<43277711,,,--37===75,----34666!!!!!!!!!!!!!!!!!!!!
+ at FSRRS4401CM938 [length=453] [gc=44.15] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=418]
+tcagGTTTTAAATCGCTTTCCAAGGAATTTGAGTCTAAATCCGGTGGATCCCATCAGTACAAATGCGGCGACAAGGCCGTGAAAACACTGCTTAATTCTTTGCACTTTTTGGCCACCTTTTTGGAAATGTTGTTTTGTGTTCTCAAAATTTTCCATCTCAGAACAAACATTCCATCGGGCTGATGTTGTGGCTTTTGGCGCGCGAAGTGCTGCTACTGCGCGGCAAAATCAGTCGCCAGACCGGTTTTGTTGTGGACGACAAAGTGATCATGCCTGACTTGTACTTCTACCGCGATCCGCAAGCGCGAATTGGTCACATAGTTATAGAATTTTTGAGCCTTTTTCTTGACATAAAAAGTGTGGTTTTAAAAATTTCCTGGCAGGACCCACGCCAACGTTCAGGAATAATATCTTTTAAAAAGctgagactgccaaggcacacaggggataggn
++
+IIIIICC>>666IIIICCCIIIIIIIIHHHIIIIIG666IIIIIIIIIIHHHIIIIIIIICCCIIIIIIIIIIIIIIIIII@@@@IIIIIIIIIIIIIHHHIIII???=;IIEEI:://///7544:?IBB72244E8EECEBC=@@@@@@@HHIIIIIIIIIIBBBIIIIIIIIIHHHIIIIIIIIIIIIICCCCIIIIIIIIIIIIIIIIIIIIIIIIIIII6666DEIIHEB??D at 777772222D89EEIIIIIIIHHHIIIIIIIIHHHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIHHHIIIIIIIIIIIIIII==?==IIIII???=;I63DDD82--,,,38==::----,,---+++33066;@6380008/:889<:BGIIIIIIIIIFE<?F5500-----5:;;;:>?@C<<7999EEEEEE@@@@EEEEE!
+ at FSRRS4401EQLIK [length=411] [gc=34.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=374]
+tcagTTTAATTTGGTGCTTCCTTTCAATTCCTTAGTTTAAACTTGGCACTGAAGTCTCGCATTTATAACTAGAGCCCGGATTTTAGAGGCTAAAAAGTTTTCCAGATTTCAAAATTTATTTCGAAACTATTTTTCTGATTGTGATGTGACGGATTTCTAAATTAAATCGAAATGATGTGTATTGAACTTAACAAGTGATTTTTATCAGATTTTGTCAATGAATAAATTTTAATTTAAATCTCTTTCTAACACTTTCATGATTAAAATCTAACAAAGCGCGACCAGTATGTGAGAAGAGCAAAAACAACAAAAAGTGCTAGCACTAAAGAAGGTTCGAACCCAACACATAACGTAAGAGTTACCGGGAAGAAAACCACTctgagactgccaaggcacacagggggataggnn
++
+III?666??HHHIIIIIIIIIGGGIIIIIIIIIIIGGGHHHIIIIIIIIIIIIIIIIIIIIGGGIIIIIIIIIIHHHIII@@@@IIIIEIE111100----22?=8---:-------,,,,33---5:3,----:1BBEEEHIIIIIIIIIIIB??A122000...:?=024GIIIIIIIIIIIIIIIIIIECCHHB=//-,,21??<5-002=6FBB?:9<=11/4444//-//77??GEIEEHIACCIIIHHHIIIIIIICCCAIIIHHHHHHIIIIIIIIIIIIIIIIIIIIIIEE1//--822;----.777 at EIIIIII???IIIIIIIIIIIHHHIIIIIIIIIIIIIIIIIIII994227775555AE;IEEEEEIIIII??9755>@==:3,,,,,33336!!
+ at FSRRS4401AOV6A [length=309] [gc=22.98] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=273]
+tcagTTTTCAAATTTTCCGAAATTTGCTGTTTGGTAGAAGGCAAATTATTTGATTGAATTTTGTATTTATTTAAAACAATTTATTTTAAAATAATAATTTTCCATTGACTTTTTACATTTAATTGATTTTATTATGCATTTTATATTTGTTTTCTAAATATTCGTTTGCAAACTCACGTTGAAATTGTATTAAACTCGAAATTAGAGTTTTTGAAATTAATTTTTATGTAGCATAATATTTTAAACATATTGGAATTTTATAAAACATTATATTTTTctgagactgccaaggcacacagggggataggn
++
+IIIICCCCI;;;CCCCIII???HHHIIIIHHHIIIIIIIIIIHHHIIIHHHIIIIIII@@@@IFICCCICAA;;;;ED?B@@D66445555<<<GII>>AAIIIIIIII;;;::III???CCCIII;;;;IFFIIIIICCCBIBIEEDC4444?4BBBE?EIIICHHII;;;HIIIIIIHH;;;HHIIIII;;;IIIIHHHIIIIII>>??>IEEBGG::1111/46FBFBB?=;=A?97771119:EAAADDBD7777=/111122DA@@B68;;;I8HHIIIII;;;;?>IECCCB/////;745=!
+ at FSRRS4401EG0ZW [length=424] [gc=23.82] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=389]
+tcagTTTTGATCTTTTAATAATGAATTTTAATGTGTTAAAATGATTGCATTGATGGCATAACCGCATTTAAATTAATTACATGAAGTGTAAGTATGAAATTTTCCTTTCCAAATTGCAAAAACTAAAATTTAAAATTTATCGTAAAAATTAACATATATTTTAAACGATTTTAAGAAACATTTGTAAATTATATTTTTGTGAAGCGTTCAAACAAAAATAAACAATAAAATATTTTTCTATTTAATAGCAAAACATTTGACGATGAAAAGGAAAATGCGGGTTTGAAAATGGGCTTTGCCATGCTATTTTCATAATAACATATTTTTATTATGAATAATAAATTTACATACAATATATACAGTCTTAAATTTATTCATAATATTTTTGAGAATctgagactgccaaggcacacaggggataggn
++
+IIA94445EEII===>IIIIIIIIICCCCIIHIIICC;;;;IIIIIIIIIIIIIIIIIIIIIIIIIF;;666DDIIIIIIIIIIIIIIIIIIIEE94442244@@666CC<<BDDA=-----2<,,,,659//00===8CIII;>>==HH;;IIIIIICC@@???III@@@@IC?666HIDDCI?B??CC<EE11111B4BDDCB;=@B777>////-=323?423,,,/=1,,,,-:4E;??EIIIIICCCCI>;;;IIIIIII<<@@?=////7=A99988<<4455IEEEIIIIIIIIIIIII<999HIIIIIIIIIII?????IIIIIIIIIIICAC;55539EIIIIIIIIIIIIIIIIHH999HHHIA=AEEFF@=.....AD@@@DDEEEEFIII;;;977FFCCC at 24449?FDD!
diff --git a/skbio/io/tests/data/longreads_original_sanger.fastq b/skbio/io/tests/data/longreads_original_sanger.fastq
new file mode 100644
index 0000000..32bab86
--- /dev/null
+++ b/skbio/io/tests/data/longreads_original_sanger.fastq
@@ -0,0 +1,120 @@
+ at FSRRS4401BE7HA [length=395] [gc=36.46] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=95]
+tcagTTAAGATGGGATAATATCCTCAGATTGCGTGATGAACTTTGTTCTGGTGGAGGAGAAGGAAGTGCATTCGACGTAT
+GCCCGTTTGTCGATATTTGtatttaaagtaatccgtcacaaatcagtgacataaatattatttagatttcgggagcaact
+ttatttattccacaagcaggtttaaattttaaatttaaattattgcagaagactttaaattaacctcgttgtcggagtca
+tttgttcggttattggtcgaaagtaaccncgggaagtgccgaaaactaacaaacaaaagaagatagtgaaattttaatta
+aaanaaatagccaaacgtaactaactaaaacggacccgtcgaggaactgccaacggacgacacagggagtagnnn
++FSRRS4401BE7HA [length=395] [gc=36.46] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=95]
+FFFDDDDDDDA666?688FFHGGIIIIIIIIIIIIIIIIIIHHHIIIIIIIIIGHGFFFFF====DFFFFFFFFFFFFFF
+D???:3104/76=:5...4.3,,,366////4<ABBAAA=CCFDDDDDDDD:666CDFFFF=<ABA=;:333111<===9
+9;B889FFFFFFDDBDBDDD=8844231..,,,-,,,,,,,,1133..---17111,,,,,22555131121.--.,333
+11,.,,3--,,.,,--,3511123..--!,,,,--,----9,,,,8=,,-,,,-,,,,---26:9:5-..1,,,,11//,
+,,,!,,1917--,,,,-3.,--,,17,,,,---+11113.030000,,,044400036;96662.//;7><;!!!
+ at FSRRS4401BRRTC [length=145] [gc=38.62] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=74]
+tcagCCAGCAATTCCGACTTAATTGTTCTTCTTCCATCATTCATCTCGACTAACAGTTCTACGATTAATGAGTTTGGCtt
+taatttgttgttcattattgtcacaattacactactgagactgccaaggcacncagggataggnn
++FSRRS4401BRRTC [length=145] [gc=38.62] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=74]
+FFFFFFFFFDDDDFFFFGFDDDDBAAAAA=<4444@@B=555:BBBBB@@?8:8<?<89898<84442;==3,,,514,,
+,11,,,.,,21777555513,..--1115758.//34488><<;;;;9944/!/4,,,57855!!
+ at FSRRS4401B64ST [length=382] [gc=40.58] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=346]
+tcagTTTTCTTAAATTACTTGAATCTGTTGAAGTGGATGTCCACTTTTGTATGCCAAATATGCCCAGCGTATACGATCTT
+GGCCACATCTCCACATAATCATCAGTCGGATGCAAAAAGCGATTAAACTAAAAATGAATGCGTTTTTAGATGAGTAAATA
+GGTAATACTTTGTTTAAATAATAAATGTCACAAACAGAACGCGGATTACAGTACCTGAAAATAGTTGTACTGTATCTGTG
+CCGGCACTTCCTCGGCCCTGAGAAGTTGTCCCGTTGTTTCCATTCGCACCATCCAATGGCCAAAGTTTGCGAAGAATCTG
+TTCCGTTCCATTACCAATTGTTTTTCCATGctgagactgccaaggcacacaggggataggnn
++FSRRS4401B64ST [length=382] [gc=40.58] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=346]
+IIIICCCCI??666IIIIIIIIIIIIIIIIIIIIIIIIIIIIII6666IAIIIII???IIIICCCIIIIIIIIIIIIIII
+IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII66333EICE::338=/----,8=>>??:2-////7>CEEIEIHHHII
+IIIIIIIE;;9911199B???IBCHIIIIIIHHHIIHHHIIIIIIIIIIIIIIIIIIBBCCIIIIIIIIIIIIIIIIIII
+IIIIIIIIIIIIIIIGGGIIIIIIIIID?===DIIIHHHIIIIIIIIIHHHIIIIIIIIIIHHHIHHHIIIIIIIIIIII
+IIIIIIIIII?>;9988==5----.@@AEGIIIIIIIIIHH????EIIIFF999;EIIBB!!
+ at FSRRS4401EJ0YH [length=381] [gc=48.29] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=343]
+tcagTTTTTGGAGAATTCCGTCAGGGACGGCATGGCATATTTGTGGGTTCGGCACGGCGTCCTGGCCAAGAAGAAGAAGA
+CGAATTAGCCCGTTAATTTAATGACACCTTCCCCAATTTTGCAGCAATGATTGGTTCATTCTTGGCGGTGCGTTTTTGTG
+CTTCGTCGAATTGTTGGCCATTTTGGTCCACCGGCCATCATCTTTACGCTATCCGACTGATTGGAAATCACCGCCTAGCA
+TTTTGCCGAAGATTGTTGCGTTGTACGGCCATGTGCTGATTGTTTACATTGGCATTCTTGGCAATTTGTCCTTGGTCGGC
+TTTGACGGCAAATTTGCGGTGTTAAGTctgagactgccaaggcacacagggggatagggnn
++FSRRS4401EJ0YH [length=381] [gc=48.29] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=343]
+IIII?????IIIIIIIIIIIIIIHHHIIIIIIIIIIIIIHHHIIHHHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
+IIIIIIIIHHHIIIIIHHHIIIIIIIIIIIAAAAII>>>>IIIIIIIIIIIIIIIIIIIIIIIIIIEEIEE;33333D7I
+IIIIIIIIIIIIIIIIIIIICC@@HHIIIIIIIIIIIIIIIIHHHIIIIIIIIIIIIIIIIIIIHHHIIIIIIIIIIIII
+BBBBIHCDCHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIHHHIIIHHCCDIIIIIIHHHIICCCH=CCIIIIIIIII
+GGGIIIIIIHHHHHHIIIIIIIIIIIIIIIHHHIIHHE??>>?EFEE?/////;:80--!!
+ at FSRRS4401BK0IB [length=507] [gc=49.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=208]
+tcagTTGACCGGCGTTGTGTAACAATAATTCATTATTCTGAGACGATGCCAATGTAATCGACGGTTTATGCCCAATTATT
+CCCATCTATGCTTAACTGATCAAATACTATTTGCATTACGTCACGAAATTGCGCGAACACCGCCGGCCGACAATAATTTA
+TACCGGACATACCGGAGTTGATGGTAATCGGTAAAGAGTTTTATTTAATTATntattatcnctattaattattgttanca
+acaatgtgcacgctntgccgcccgccgccgccgtgtcggtaggaccccggacggacccggacccggttcgggtacccgtt
+ttcgggttcccggaaccgtttttcgggtacccggttttttcggggggccccccggtaaaaaaccggggaaccccctaaaa
+cgggtaaacgtaccgtaagggaccccctaaacgggggccccgaaaaaccgggacccaaaccggggggaaacggttaaagg
+ggggggaagtaggngnnnnnnnnnnnn
++FSRRS4401BK0IB [length=507] [gc=49.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=208]
+FFFA@@FFFFFFFFFFHHB:::@BFFFFGGHIHIIIIIIIIIIIIIIIIIIIIIIIFFFFFFFFF?=BA at 11188011<<
+88;?AABDDC???DDAAAADA666D?DDD=====AA>?>><<<=<11188<<???AA?9555=ABBB@@?=>>?@@1114
+2::DDA???DFFFFFFFFFFFFFBAAAA<<0000.22=////8,--111111!23--/24!37:6666<;822/..4!46
+521177553.-.23!231121112,,-,,211==5------,12,,,,,,-,,,-1,,,,-,,155--,,,,13111.,,
+,,,,,,++111..11..1,,,,,,,,,+3,,,,,--22-----//----55//**/--22--**,,,,**,,,,,,.1.,
+*,,,,***,,,,,,,,,,,,,,,,,,,,,,,),,-,,,,,,),,,,,**//.),,,///,,,,,,,,,,,.))33---,,
+,,,,,,,,(0,,,!.!!!!!!!!!!!!
+ at FSRRS4401ARCCB [length=258] [gc=46.90] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=193]
+tcagTTATTGCAGTCGTTCCGCGCCATCGCCGGTAACCGTCCGCGTGTTATTCTGTGTATCGGCCAACCTTCGTATAACT
+TCGTATAATGTATGCTATACGAAGTTATTACGATCTATACCGGCGAAACTCAGCCGAAAGGTCTCGCGGTAGAGCCTATG
+AGCTGCCCGACCGATGCATTTAAATTTCCGGGGATCGtcgctgatctgagactgccaaaggcacactagggggataggnn
+nnnnnnnnnnnnnnnnnn
++FSRRS4401ARCCB [length=258] [gc=46.90] [flows=800] [phred_min=0] [phred_max=38] [trimmed_length=193]
+FFF<8::@DFFFFFFFGGFDCAAAAAB@@000046<;66322366762243348<<=??4445::>ABAAA@<<==B=:5
+55:BBD??=BDDDDFFFCCCCCCCFFCDDDFFFFFDBAA==88880004><<<99688;889<889?BBBBA=???DDBB
+B@@??88889---237771,,,,,,,,--1152<<00158A@><<<<<43277711,,,--37===75,----34666!!
+!!!!!!!!!!!!!!!!!!
+ at FSRRS4401CM938 [length=453] [gc=44.15] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=418]
+tcagGTTTTAAATCGCTTTCCAAGGAATTTGAGTCTAAATCCGGTGGATCCCATCAGTACAAATGCGGCGACAAGGCCGT
+GAAAACACTGCTTAATTCTTTGCACTTTTTGGCCACCTTTTTGGAAATGTTGTTTTGTGTTCTCAAAATTTTCCATCTCA
+GAACAAACATTCCATCGGGCTGATGTTGTGGCTTTTGGCGCGCGAAGTGCTGCTACTGCGCGGCAAAATCAGTCGCCAGA
+CCGGTTTTGTTGTGGACGACAAAGTGATCATGCCTGACTTGTACTTCTACCGCGATCCGCAAGCGCGAATTGGTCACATA
+GTTATAGAATTTTTGAGCCTTTTTCTTGACATAAAAAGTGTGGTTTTAAAAATTTCCTGGCAGGACCCACGCCAACGTTC
+AGGAATAATATCTTTTAAAAAGctgagactgccaaggcacacaggggataggn
++FSRRS4401CM938 [length=453] [gc=44.15] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=418]
+IIIIICC>>666IIIICCCIIIIIIIIHHHIIIIIG666IIIIIIIIIIHHHIIIIIIIICCCIIIIIIIIIIIIIIIII
+I@@@@IIIIIIIIIIIIIHHHIIII???=;IIEEI:://///7544:?IBB72244E8EECEBC=@@@@@@@HHIIIIII
+IIIIBBBIIIIIIIIIHHHIIIIIIIIIIIIICCCCIIIIIIIIIIIIIIIIIIIIIIIIIIII6666DEIIHEB??D at 7
+77772222D89EEIIIIIIIHHHIIIIIIIIHHHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIHHHIIIIII
+IIIIIIIII==?==IIIII???=;I63DDD82--,,,38==::----,,---+++33066;@6380008/:889<:BGII
+IIIIIIIFE<?F5500-----5:;;;:>?@C<<7999EEEEEE@@@@EEEEE!
+ at FSRRS4401EQLIK [length=411] [gc=34.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=374]
+tcagTTTAATTTGGTGCTTCCTTTCAATTCCTTAGTTTAAACTTGGCACTGAAGTCTCGCATTTATAACTAGAGCCCGGA
+TTTTAGAGGCTAAAAAGTTTTCCAGATTTCAAAATTTATTTCGAAACTATTTTTCTGATTGTGATGTGACGGATTTCTAA
+ATTAAATCGAAATGATGTGTATTGAACTTAACAAGTGATTTTTATCAGATTTTGTCAATGAATAAATTTTAATTTAAATC
+TCTTTCTAACACTTTCATGATTAAAATCTAACAAAGCGCGACCAGTATGTGAGAAGAGCAAAAACAACAAAAAGTGCTAG
+CACTAAAGAAGGTTCGAACCCAACACATAACGTAAGAGTTACCGGGAAGAAAACCACTctgagactgccaaggcacacag
+ggggataggnn
++FSRRS4401EQLIK [length=411] [gc=34.31] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=374]
+III?666??HHHIIIIIIIIIGGGIIIIIIIIIIIGGGHHHIIIIIIIIIIIIIIIIIIIIGGGIIIIIIIIIIHHHIII
+@@@@IIIIEIE111100----22?=8---:-------,,,,33---5:3,----:1BBEEEHIIIIIIIIIIIB??A122
+000...:?=024GIIIIIIIIIIIIIIIIIIECCHHB=//-,,21??<5-002=6FBB?:9<=11/4444//-//77??G
+EIEEHIACCIIIHHHIIIIIIICCCAIIIHHHHHHIIIIIIIIIIIIIIIIIIIIIIEE1//--822;----.777 at EII
+IIII???IIIIIIIIIIIHHHIIIIIIIIIIIIIIIIIIII994227775555AE;IEEEEEIIIII??9755>@==:3,
+,,,,33336!!
+ at FSRRS4401AOV6A [length=309] [gc=22.98] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=273]
+tcagTTTTCAAATTTTCCGAAATTTGCTGTTTGGTAGAAGGCAAATTATTTGATTGAATTTTGTATTTATTTAAAACAAT
+TTATTTTAAAATAATAATTTTCCATTGACTTTTTACATTTAATTGATTTTATTATGCATTTTATATTTGTTTTCTAAATA
+TTCGTTTGCAAACTCACGTTGAAATTGTATTAAACTCGAAATTAGAGTTTTTGAAATTAATTTTTATGTAGCATAATATT
+TTAAACATATTGGAATTTTATAAAACATTATATTTTTctgagactgccaaggcacacagggggataggn
++FSRRS4401AOV6A [length=309] [gc=22.98] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=273]
+IIIICCCCI;;;CCCCIII???HHHIIIIHHHIIIIIIIIIIHHHIIIHHHIIIIIII@@@@IFICCCICAA;;;;ED?B
+@@D66445555<<<GII>>AAIIIIIIII;;;::III???CCCIII;;;;IFFIIIIICCCBIBIEEDC4444?4BBBE?
+EIIICHHII;;;HIIIIIIHH;;;HHIIIII;;;IIIIHHHIIIIII>>??>IEEBGG::1111/46FBFBB?=;=A?97
+771119:EAAADDBD7777=/111122DA@@B68;;;I8HHIIIII;;;;?>IECCCB/////;745=!
+ at FSRRS4401EG0ZW [length=424] [gc=23.82] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=389]
+tcagTTTTGATCTTTTAATAATGAATTTTAATGTGTTAAAATGATTGCATTGATGGCATAACCGCATTTAAATTAATTAC
+ATGAAGTGTAAGTATGAAATTTTCCTTTCCAAATTGCAAAAACTAAAATTTAAAATTTATCGTAAAAATTAACATATATT
+TTAAACGATTTTAAGAAACATTTGTAAATTATATTTTTGTGAAGCGTTCAAACAAAAATAAACAATAAAATATTTTTCTA
+TTTAATAGCAAAACATTTGACGATGAAAAGGAAAATGCGGGTTTGAAAATGGGCTTTGCCATGCTATTTTCATAATAACA
+TATTTTTATTATGAATAATAAATTTACATACAATATATACAGTCTTAAATTTATTCATAATATTTTTGAGAATctgagac
+tgccaaggcacacaggggataggn
++FSRRS4401EG0ZW [length=424] [gc=23.82] [flows=800] [phred_min=0] [phred_max=40] [trimmed_length=389]
+IIA94445EEII===>IIIIIIIIICCCCIIHIIICC;;;;IIIIIIIIIIIIIIIIIIIIIIIIIF;;666DDIIIIII
+IIIIIIIIIIIIIEE94442244@@666CC<<BDDA=-----2<,,,,659//00===8CIII;>>==HH;;IIIIIICC
+@@???III@@@@IC?666HIDDCI?B??CC<EE11111B4BDDCB;=@B777>////-=323?423,,,/=1,,,,-:4E
+;??EIIIIICCCCI>;;;IIIIIII<<@@?=////7=A99988<<4455IEEEIIIIIIIIIIIII<999HIIIIIIIII
+II?????IIIIIIIIIIICAC;55539EIIIIIIIIIIIIIIIIHH999HHHIA=AEEFF@=.....AD@@@DDEEEEFI
+II;;;977FFCCC at 24449?FDD!
diff --git a/skbio/io/tests/data/misc_dna_as_illumina.fastq b/skbio/io/tests/data/misc_dna_as_illumina.fastq
new file mode 100644
index 0000000..66458bd
--- /dev/null
+++ b/skbio/io/tests/data/misc_dna_as_illumina.fastq
@@ -0,0 +1,16 @@
+ at FAKE0007 Original version has lower case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTA
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
+ at FAKE0008 Original version has mixed case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+gTcatAGcgTcatAGcgTcatAGcgTcatAGcgTcatAGcg
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
+ at FAKE0009 Original version has lower case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+tcagtcagtcagtcagtcagtcagtcagtcagtcagtcagt
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
+ at FAKE0010 Original version has mixed case ambiguous DNA and PHRED scores of 40, 30, 20, 10 (cycled)
+gatcrywsmkhbvdnGATCRYWSMKHBVDN
++
+h^TJh^TJh^TJh^TJh^TJh^TJh^TJh^
diff --git a/skbio/io/tests/data/misc_dna_as_sanger.fastq b/skbio/io/tests/data/misc_dna_as_sanger.fastq
new file mode 100644
index 0000000..9518688
--- /dev/null
+++ b/skbio/io/tests/data/misc_dna_as_sanger.fastq
@@ -0,0 +1,16 @@
+ at FAKE0007 Original version has lower case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTA
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0008 Original version has mixed case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+gTcatAGcgTcatAGcgTcatAGcgTcatAGcgTcatAGcg
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0009 Original version has lower case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+tcagtcagtcagtcagtcagtcagtcagtcagtcagtcagt
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0010 Original version has mixed case ambiguous DNA and PHRED scores of 40, 30, 20, 10 (cycled)
+gatcrywsmkhbvdnGATCRYWSMKHBVDN
++
+I?5+I?5+I?5+I?5+I?5+I?5+I?5+I?
diff --git a/skbio/io/tests/data/misc_dna_original_sanger.fastq b/skbio/io/tests/data/misc_dna_original_sanger.fastq
new file mode 100644
index 0000000..9518688
--- /dev/null
+++ b/skbio/io/tests/data/misc_dna_original_sanger.fastq
@@ -0,0 +1,16 @@
+ at FAKE0007 Original version has lower case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTA
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0008 Original version has mixed case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+gTcatAGcgTcatAGcgTcatAGcgTcatAGcgTcatAGcg
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0009 Original version has lower case unambiguous DNA with PHRED scores from 0 to 40 inclusive (in that order)
+tcagtcagtcagtcagtcagtcagtcagtcagtcagtcagt
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0010 Original version has mixed case ambiguous DNA and PHRED scores of 40, 30, 20, 10 (cycled)
+gatcrywsmkhbvdnGATCRYWSMKHBVDN
++
+I?5+I?5+I?5+I?5+I?5+I?5+I?5+I?
diff --git a/skbio/io/tests/data/misc_rna_as_illumina.fastq b/skbio/io/tests/data/misc_rna_as_illumina.fastq
new file mode 100644
index 0000000..3429ad9
--- /dev/null
+++ b/skbio/io/tests/data/misc_rna_as_illumina.fastq
@@ -0,0 +1,16 @@
+ at FAKE0011 Original version has lower case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+ACGUACGUACGUACGUACGUACGUACGUACGUACGUACGUA
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
+ at FAKE0012 Original version has mixed case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+gUcauAGcgUcauAGcgUcauAGcgUcauAGcgUcauAGcg
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
+ at FAKE0013 Original version has lower case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+ucagucagucagucagucagucagucagucagucagucagu
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
+ at FAKE0014 Original version has mixed case ambiguous RNA with PHRED scores from 35 to 40 inclusive (cycled)
+gaucrywsmkhbvdnGAUCRYWSMKHBVDN
++
+cdefghcdefghcdefghcdefghcdefgh
diff --git a/skbio/io/tests/data/misc_rna_as_sanger.fastq b/skbio/io/tests/data/misc_rna_as_sanger.fastq
new file mode 100644
index 0000000..bfff87b
--- /dev/null
+++ b/skbio/io/tests/data/misc_rna_as_sanger.fastq
@@ -0,0 +1,16 @@
+ at FAKE0011 Original version has lower case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+ACGUACGUACGUACGUACGUACGUACGUACGUACGUACGUA
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0012 Original version has mixed case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+gUcauAGcgUcauAGcgUcauAGcgUcauAGcgUcauAGcg
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0013 Original version has lower case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+ucagucagucagucagucagucagucagucagucagucagu
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0014 Original version has mixed case ambiguous RNA with PHRED scores from 35 to 40 inclusive (cycled)
+gaucrywsmkhbvdnGAUCRYWSMKHBVDN
++
+DEFGHIDEFGHIDEFGHIDEFGHIDEFGHI
diff --git a/skbio/io/tests/data/misc_rna_original_sanger.fastq b/skbio/io/tests/data/misc_rna_original_sanger.fastq
new file mode 100644
index 0000000..bfff87b
--- /dev/null
+++ b/skbio/io/tests/data/misc_rna_original_sanger.fastq
@@ -0,0 +1,16 @@
+ at FAKE0011 Original version has lower case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+ACGUACGUACGUACGUACGUACGUACGUACGUACGUACGUA
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0012 Original version has mixed case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+gUcauAGcgUcauAGcgUcauAGcgUcauAGcgUcauAGcg
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0013 Original version has lower case unambiguous RNA with PHRED scores from 0 to 40 inclusive (in that order)
+ucagucagucagucagucagucagucagucagucagucagu
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI
+ at FAKE0014 Original version has mixed case ambiguous RNA with PHRED scores from 35 to 40 inclusive (cycled)
+gaucrywsmkhbvdnGAUCRYWSMKHBVDN
++
+DEFGHIDEFGHIDEFGHIDEFGHIDEFGHI
diff --git a/skbio/io/tests/data/ordination_L&L_CA_data_scores b/skbio/io/tests/data/ordination_L&L_CA_data_scores
new file mode 100644
index 0000000..3f6da49
--- /dev/null
+++ b/skbio/io/tests/data/ordination_L&L_CA_data_scores
@@ -0,0 +1,18 @@
+Eigvals	2
+0.0961330159181	0.0409418140138
+
+Proportion explained	0
+
+Species	3	2
+Species1	0.408869425742	0.0695518116298
+Species2	-0.1153860437	-0.299767683538
+Species3	-0.309967102571	0.187391917117
+
+Site	3	2
+Site1	-0.848956053187	0.882764759014
+Site2	-0.220458650578	-1.34482000302
+Site3	1.66697179591	0.470324389808
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_PCoA_sample_data_3_scores b/skbio/io/tests/data/ordination_PCoA_sample_data_3_scores
new file mode 100644
index 0000000..d428dc3
--- /dev/null
+++ b/skbio/io/tests/data/ordination_PCoA_sample_data_3_scores
@@ -0,0 +1,22 @@
+Eigvals	9
+0.512367260461	0.300719094427	0.267912066004	0.208988681078	0.19169895326	0.16054234528	0.15017695712	0.122457748167	0.0
+
+Proportion explained	9
+0.267573832777	0.15704469605	0.139911863774	0.109140272454	0.100111048503	0.0838401161912	0.0784269939011	0.0639511763509	0.0
+
+Species	0	0
+
+Site	9	9
+PC.636	-0.258465461183	0.173999546883	0.0382875792552	-0.19447750562	0.0831176020844	0.262430333201	-0.0231636392235	-0.0184794039581	0.0
+PC.635	-0.271001135391	-0.0185951319063	-0.0864841926349	0.118064245315	-0.198808358437	-0.0211723599535	-0.191024027565	0.155646592377	0.0
+PC.356	0.235077898175	0.0962519254489	-0.345792726714	-0.00320862577619	-0.0963777675519	0.0457025386953	0.185472813286	0.0404093971793	0.0
+PC.481	0.0261407664325	-0.0111459676533	0.147660603015	0.29087660853	0.203945472801	0.0619712384758	0.101641328709	0.105690998719	0.0
+PC.354	0.285007552283	-0.0192549888483	0.0623263375385	0.138126799852	-0.104798602423	0.0951720730628	-0.129636097542	-0.220687170372	0.0
+PC.593	0.204636326241	-0.139361150932	0.291513819623	-0.181566786821	-0.159580132715	-0.0246412130162	0.0866252404441	0.0996221476871	0.0
+PC.355	0.233482403212	0.225257974068	-0.0188623096268	-0.107729981831	0.177108999572	-0.192905835151	-0.149819471408	0.0383549037465	0.0
+PC.607	-0.0949631911323	-0.420974802495	-0.154869454869	-0.0898427509281	0.152618194488	-0.0334232691501	-0.0251224777303	-0.0508988536409	0.0
+PC.634	-0.359915158638	0.113822595435	0.0662203444138	0.0297579972788	-0.0572254078183	-0.193133506163	0.145026331031	-0.149658611738	0.0
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error1 b/skbio/io/tests/data/ordination_error1
new file mode 100644
index 0000000..d0d1d47
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error1
@@ -0,0 +1,43 @@
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error10 b/skbio/io/tests/data/ordination_error10
new file mode 100644
index 0000000..2a38f2a
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error10
@@ -0,0 +1,18 @@
+Eigvals	2
+0.0961330159181	0.0409418140138
+
+Proportion explained	0
+
+Species	3	2
+Species1	0.408869425742	0.0695518116298
+Species2	-0.1153860437	-0.299767683538
+Species3	-0.309967102571	0.187391917117
+
+Site	3	2
+Site1	-0.848956053187
+Site2	-0.220458650578
+Site3	1.66697179591
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error11 b/skbio/io/tests/data/ordination_error11
new file mode 100644
index 0000000..a533e3f
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error11
@@ -0,0 +1,44 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Bro2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error12 b/skbio/io/tests/data/ordination_error12
new file mode 100644
index 0000000..8bf07a0
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error12
@@ -0,0 +1,22 @@
+Eigvals	9
+0.512367260461	0.300719094427	0.208988681078	0.19169895326	0.16054234528	0.15017695712	0.122457748167	0.0
+
+Proportion explained	9
+0.267573832777	0.15704469605	0.139911863774	0.109140272454	0.100111048503	0.0838401161912	0.0784269939011	0.0639511763509	0.0
+
+Species	0	0
+
+Site	9	9
+PC.636	-0.258465461183	0.173999546883	0.0382875792552	-0.19447750562	0.0831176020844	0.262430333201	-0.0231636392235	-0.0184794039581	0.0
+PC.635	-0.271001135391	-0.0185951319063	-0.0864841926349	0.118064245315	-0.198808358437	-0.0211723599535	-0.191024027565	0.155646592377	0.0
+PC.356	0.235077898175	0.0962519254489	-0.345792726714	-0.00320862577619	-0.0963777675519	0.0457025386953	0.185472813286	0.0404093971793	0.0
+PC.481	0.0261407664325	-0.0111459676533	0.147660603015	0.29087660853	0.203945472801	0.0619712384758	0.101641328709	0.105690998719	0.0
+PC.354	0.285007552283	-0.0192549888483	0.0623263375385	0.138126799852	-0.104798602423	0.0951720730628	-0.129636097542	-0.220687170372	0.0
+PC.593	0.204636326241	-0.139361150932	0.291513819623	-0.181566786821	-0.159580132715	-0.0246412130162	0.0866252404441	0.0996221476871	0.0
+PC.355	0.233482403212	0.225257974068	-0.0188623096268	-0.107729981831	0.177108999572	-0.192905835151	-0.149819471408	0.0383549037465	0.0
+PC.607	-0.0949631911323	-0.420974802495	-0.154869454869	-0.0898427509281	0.152618194488	-0.0334232691501	-0.0251224777303	-0.0508988536409	0.0
+PC.634	-0.359915158638	0.113822595435	0.0662203444138	0.0297579972788	-0.0572254078183	-0.193133506163	0.145026331031	-0.149658611738	0.0
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error13 b/skbio/io/tests/data/ordination_error13
new file mode 100644
index 0000000..24bd662
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error13
@@ -0,0 +1,22 @@
+Eigvals	9
+0.512367260461	0.300719094427	0.267912066004	0.208988681078	0.19169895326	0.16054234528	0.15017695712	0.122457748167	0.0
+
+Proportion explained	9
+0.267573832777	0.15704469605	0.109140272454	0.100111048503	0.0838401161912	0.0784269939011	0.0639511763509	0.0
+
+Species	0	0
+
+Site	9	9
+PC.636	-0.258465461183	0.173999546883	0.0382875792552	-0.19447750562	0.0831176020844	0.262430333201	-0.0231636392235	-0.0184794039581	0.0
+PC.635	-0.271001135391	-0.0185951319063	-0.0864841926349	0.118064245315	-0.198808358437	-0.0211723599535	-0.191024027565	0.155646592377	0.0
+PC.356	0.235077898175	0.0962519254489	-0.345792726714	-0.00320862577619	-0.0963777675519	0.0457025386953	0.185472813286	0.0404093971793	0.0
+PC.481	0.0261407664325	-0.0111459676533	0.147660603015	0.29087660853	0.203945472801	0.0619712384758	0.101641328709	0.105690998719	0.0
+PC.354	0.285007552283	-0.0192549888483	0.0623263375385	0.138126799852	-0.104798602423	0.0951720730628	-0.129636097542	-0.220687170372	0.0
+PC.593	0.204636326241	-0.139361150932	0.291513819623	-0.181566786821	-0.159580132715	-0.0246412130162	0.0866252404441	0.0996221476871	0.0
+PC.355	0.233482403212	0.225257974068	-0.0188623096268	-0.107729981831	0.177108999572	-0.192905835151	-0.149819471408	0.0383549037465	0.0
+PC.607	-0.0949631911323	-0.420974802495	-0.154869454869	-0.0898427509281	0.152618194488	-0.0334232691501	-0.0251224777303	-0.0508988536409	0.0
+PC.634	-0.359915158638	0.113822595435	0.0662203444138	0.0297579972788	-0.0572254078183	-0.193133506163	0.145026331031	-0.149658611738	0.0
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error14 b/skbio/io/tests/data/ordination_error14
new file mode 100644
index 0000000..01d4965
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error14
@@ -0,0 +1,22 @@
+Eigvals	9
+0.512367260461	0.300719094427	0.267912066004	0.208988681078	0.19169895326	0.16054234528	0.15017695712	0.122457748167	0.0
+
+Proportion explained	9
+0.267573832777	0.15704469605	0.139911863774	0.109140272454	0.100111048503	0.0838401161912	0.0784269939011	0.0639511763509	0.0
+
+Species	0	0
+
+Site	9	0
+PC.636	-0.258465461183	0.173999546883	0.0382875792552	-0.19447750562	0.0831176020844	0.262430333201	-0.0231636392235	-0.0184794039581	0.0
+PC.635	-0.271001135391	-0.0185951319063	-0.0864841926349	0.118064245315	-0.198808358437	-0.0211723599535	-0.191024027565	0.155646592377	0.0
+PC.356	0.235077898175	0.0962519254489	-0.345792726714	-0.00320862577619	-0.0963777675519	0.0457025386953	0.185472813286	0.0404093971793	0.0
+PC.481	0.0261407664325	-0.0111459676533	0.147660603015	0.29087660853	0.203945472801	0.0619712384758	0.101641328709	0.105690998719	0.0
+PC.354	0.285007552283	-0.0192549888483	0.0623263375385	0.138126799852	-0.104798602423	0.0951720730628	-0.129636097542	-0.220687170372	0.0
+PC.593	0.204636326241	-0.139361150932	0.291513819623	-0.181566786821	-0.159580132715	-0.0246412130162	0.0866252404441	0.0996221476871	0.0
+PC.355	0.233482403212	0.225257974068	-0.0188623096268	-0.107729981831	0.177108999572	-0.192905835151	-0.149819471408	0.0383549037465	0.0
+PC.607	-0.0949631911323	-0.420974802495	-0.154869454869	-0.0898427509281	0.152618194488	-0.0334232691501	-0.0251224777303	-0.0508988536409	0.0
+PC.634	-0.359915158638	0.113822595435	0.0662203444138	0.0297579972788	-0.0572254078183	-0.193133506163	0.145026331031	-0.149658611738	0.0
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error15 b/skbio/io/tests/data/ordination_error15
new file mode 100644
index 0000000..0149cfa
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error15
@@ -0,0 +1,21 @@
+Eigvals	9
+0.512367260461	0.300719094427	0.267912066004	0.208988681078	0.19169895326	0.16054234528	0.15017695712	0.122457748167	0.0
+
+Proportion explained	9
+0.267573832777	0.15704469605	0.139911863774	0.109140272454	0.100111048503	0.0838401161912	0.0784269939011	0.0639511763509	0.0
+
+Species	0	0
+
+Site	9	9
+PC.636	-0.258465461183	0.173999546883	0.0382875792552	-0.19447750562	0.0831176020844	0.262430333201	-0.0231636392235	-0.0184794039581
+PC.635	-0.271001135391	-0.0185951319063	-0.0864841926349	0.118064245315	-0.198808358437	-0.0211723599535	-0.191024027565	0.155646592377
+PC.356	0.235077898175	0.0962519254489	-0.345792726714	-0.00320862577619	-0.0963777675519	0.0457025386953	0.185472813286	0.0404093971793
+PC.481	0.0261407664325	-0.0111459676533	0.147660603015	0.29087660853	0.203945472801	0.0619712384758	0.101641328709	0.105690998719
+PC.354	0.285007552283	-0.0192549888483	0.0623263375385	0.138126799852	-0.104798602423	0.0951720730628	-0.129636097542	-0.220687170372
+PC.593	0.204636326241	-0.139361150932	0.291513819623	-0.181566786821	-0.159580132715	-0.0246412130162	0.0866252404441	0.0996221476871
+PC.355	0.233482403212	0.225257974068	-0.0188623096268	-0.107729981831	0.177108999572	-0.192905835151	-0.149819471408	0.0383549037465
+PC.607	-0.0949631911323	-0.420974802495	-0.154869454869	-0.0898427509281	0.152618194488	-0.0334232691501	-0.0251224777303	-0.0508988536409
+PC.634	-0.359915158638	0.113822595435	0.0662203444138	0.0297579972788	-0.0572254078183	-0.193133506163	0.145026331031	-0.149658611738
+Biplot
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error16 b/skbio/io/tests/data/ordination_error16
new file mode 100644
index 0000000..04ab0a4
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error16
@@ -0,0 +1,44 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	0
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error17 b/skbio/io/tests/data/ordination_error17
new file mode 100644
index 0000000..a08769e
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error17
@@ -0,0 +1,44 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084
+-0.994016563505	0.0609533148724
+0.184352565909	-0.974867543612
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error18 b/skbio/io/tests/data/ordination_error18
new file mode 100644
index 0000000..80f9a05
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error18
@@ -0,0 +1,22 @@
+Eigvals	9
+0.512367260461	0.300719094427	0.267912066004	0.208988681078	0.19169895326	0.16054234528	0.15017695712	0.122457748167	0.0
+
+Proportion explained	8
+0.267573832777	0.139911863774	0.109140272454	0.100111048503	0.0838401161912	0.0784269939011	0.0639511763509	0.0
+
+Species	0	0
+
+Site	9	9
+PC.636	-0.258465461183	0.173999546883	0.0382875792552	-0.19447750562	0.0831176020844	0.262430333201	-0.0231636392235	-0.0184794039581	0.0
+PC.635	-0.271001135391	-0.0185951319063	-0.0864841926349	0.118064245315	-0.198808358437	-0.0211723599535	-0.191024027565	0.155646592377	0.0
+PC.356	0.235077898175	0.0962519254489	-0.345792726714	-0.00320862577619	-0.0963777675519	0.0457025386953	0.185472813286	0.0404093971793	0.0
+PC.481	0.0261407664325	-0.0111459676533	0.147660603015	0.29087660853	0.203945472801	0.0619712384758	0.101641328709	0.105690998719	0.0
+PC.354	0.285007552283	-0.0192549888483	0.0623263375385	0.138126799852	-0.104798602423	0.0951720730628	-0.129636097542	-0.220687170372	0.0
+PC.593	0.204636326241	-0.139361150932	0.291513819623	-0.181566786821	-0.159580132715	-0.0246412130162	0.0866252404441	0.0996221476871	0.0
+PC.355	0.233482403212	0.225257974068	-0.0188623096268	-0.107729981831	0.177108999572	-0.192905835151	-0.149819471408	0.0383549037465	0.0
+PC.607	-0.0949631911323	-0.420974802495	-0.154869454869	-0.0898427509281	0.152618194488	-0.0334232691501	-0.0251224777303	-0.0508988536409	0.0
+PC.634	-0.359915158638	0.113822595435	0.0662203444138	0.0297579972788	-0.0572254078183	-0.193133506163	0.145026331031	-0.149658611738	0.0
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error19 b/skbio/io/tests/data/ordination_error19
new file mode 100644
index 0000000..0e67a3b
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error19
@@ -0,0 +1,18 @@
+Eigvals	2
+0.0961330159181	0.0409418140138
+
+Proportion explained	0
+
+Species	3	1
+Species1	0.408869425742
+Species2	-0.1153860437
+Species3	-0.309967102571
+
+Site	3	2
+Site1	-0.848956053187	0.882764759014
+Site2	-0.220458650578	-1.34482000302
+Site3	1.66697179591	0.470324389808
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error2 b/skbio/io/tests/data/ordination_error2
new file mode 100644
index 0000000..d23e25c
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error2
@@ -0,0 +1,42 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error20 b/skbio/io/tests/data/ordination_error20
new file mode 100644
index 0000000..b49806b
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error20
@@ -0,0 +1,18 @@
+Eigvals	2
+0.0961330159181	0.0409418140138
+
+Proportion explained	0
+
+Species	3	2
+Species1	0.408869425742	0.0695518116298
+Species2	-0.1153860437	-0.299767683538
+Species3	-0.309967102571	0.187391917117
+
+Site	3	1
+Site1	-0.848956053187
+Site2	-0.220458650578
+Site3	1.66697179591
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error21 b/skbio/io/tests/data/ordination_error21
new file mode 100644
index 0000000..4687f10
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error21
@@ -0,0 +1 @@
+Eigvals	0
diff --git a/skbio/io/tests/data/ordination_error22 b/skbio/io/tests/data/ordination_error22
new file mode 100644
index 0000000..0f8b83c
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error22
@@ -0,0 +1,2 @@
+Eigvals	1
+0.12345
diff --git a/skbio/io/tests/data/ordination_error23 b/skbio/io/tests/data/ordination_error23
new file mode 100644
index 0000000..a498d28
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error23
@@ -0,0 +1,4 @@
+Eigvals	1
+0.12345
+
+Proportion explained	1
diff --git a/skbio/io/tests/data/ordination_error24 b/skbio/io/tests/data/ordination_error24
new file mode 100644
index 0000000..e41c9a9
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error24
@@ -0,0 +1,8 @@
+Eigvals	1
+0.12345
+
+Proportion explained	1
+0.6789
+
+Species	2	1
+foo	0.987654
diff --git a/skbio/io/tests/data/ordination_error3 b/skbio/io/tests/data/ordination_error3
new file mode 100644
index 0000000..04ed3b8
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error3
@@ -0,0 +1,43 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error4 b/skbio/io/tests/data/ordination_error4
new file mode 100644
index 0000000..4a7a37c
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error4
@@ -0,0 +1,43 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error5 b/skbio/io/tests/data/ordination_error5
new file mode 100644
index 0000000..f15d8eb
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error5
@@ -0,0 +1,44 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error6 b/skbio/io/tests/data/ordination_error6
new file mode 100644
index 0000000..aa880f9
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error6
@@ -0,0 +1,44 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_error7 b/skbio/io/tests/data/ordination_error7
new file mode 100644
index 0000000..d87ff26
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error7
@@ -0,0 +1,3 @@
+Eigvals	2
+0.0961330159181	0.0409418140138
+Proportion explained	0
diff --git a/skbio/io/tests/data/ordination_error8 b/skbio/io/tests/data/ordination_error8
new file mode 100644
index 0000000..ce36759
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error8
@@ -0,0 +1,22 @@
+Eigvals	9
+0.512367260461	0.300719094427	0.267912066004	0.208988681078	0.19169895326	0.16054234528	0.15017695712	0.122457748167	0.0
+
+Proportion explained	9
+0.267573832777	0.139911863774	0.109140272454	0.100111048503	0.0838401161912	0.0784269939011	0.0639511763509	0.0
+
+Species	0	0
+
+Site	9	9
+PC.636	-0.258465461183	0.173999546883	0.0382875792552	-0.19447750562	0.0831176020844	0.262430333201	-0.0231636392235	-0.0184794039581	0.0
+PC.635	-0.271001135391	-0.0185951319063	-0.0864841926349	0.118064245315	-0.198808358437	-0.0211723599535	-0.191024027565	0.155646592377	0.0
+PC.356	0.235077898175	0.0962519254489	-0.345792726714	-0.00320862577619	-0.0963777675519	0.0457025386953	0.185472813286	0.0404093971793	0.0
+PC.481	0.0261407664325	-0.0111459676533	0.147660603015	0.29087660853	0.203945472801	0.0619712384758	0.101641328709	0.105690998719	0.0
+PC.354	0.285007552283	-0.0192549888483	0.0623263375385	0.138126799852	-0.104798602423	0.0951720730628	-0.129636097542	-0.220687170372	0.0
+PC.593	0.204636326241	-0.139361150932	0.291513819623	-0.181566786821	-0.159580132715	-0.0246412130162	0.0866252404441	0.0996221476871	0.0
+PC.355	0.233482403212	0.225257974068	-0.0188623096268	-0.107729981831	0.177108999572	-0.192905835151	-0.149819471408	0.0383549037465	0.0
+PC.607	-0.0949631911323	-0.420974802495	-0.154869454869	-0.0898427509281	0.152618194488	-0.0334232691501	-0.0251224777303	-0.0508988536409	0.0
+PC.634	-0.359915158638	0.113822595435	0.0662203444138	0.0297579972788	-0.0572254078183	-0.193133506163	0.145026331031	-0.149658611738	0.0
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_error9 b/skbio/io/tests/data/ordination_error9
new file mode 100644
index 0000000..ea3102c
--- /dev/null
+++ b/skbio/io/tests/data/ordination_error9
@@ -0,0 +1,18 @@
+Eigvals	2
+0.0961330159181	0.0409418140138
+
+Proportion explained	0
+
+Species	3	2
+Species1	0.408869425742
+Species2	-0.1153860437
+Species3	-0.309967102571
+
+Site	3	2
+Site1	-0.848956053187	0.882764759014
+Site2	-0.220458650578	-1.34482000302
+Site3	1.66697179591	0.470324389808
+
+Biplot	0	0
+
+Site constraints	0	0
diff --git a/skbio/io/tests/data/ordination_example2_scores b/skbio/io/tests/data/ordination_example2_scores
new file mode 100644
index 0000000..8354bd2
--- /dev/null
+++ b/skbio/io/tests/data/ordination_example2_scores
@@ -0,0 +1,42 @@
+Eigvals	7
+25.8979540892	14.9825779819	8.93784077262	6.13995623072	1.68070536498	0.57735026919	0.275983624351
+
+Proportion explained	0
+
+Species	6	7
+Species0	1.38198713901	-1.71496426179	0.632272455288	0.00712898231575	0.120512431133	-0.0723104306179	-0.00815886062344
+Species1	0.919178380672	-1.25430767906	-1.1787426896	-0.00712898231576	-0.120512431133	-0.0723104306179	0.00815886062344
+Species2	3.39897234869	0.446168315515	0.406691610423	0.749336668014	0.0793892812781	7.37971401683e-17	0.0329418170936
+Species3	2.52353261895	0.446932822723	-0.413412046583	-0.639449029945	-0.0640330006084	3.40602185392e-17	0.0335491330226
+Species4	-0.53155341411	-1.34263985744	0.464155649196	-0.412041388665	0.198336195195	7.37971401683e-17	0.00604836743485
+Species5	-0.288618167117	-0.571491852197	-0.406527290424	0.206020694333	-0.0991680975973	-1.13534061797e-17	-0.00302418371743
+
+Site	10	7
+Site0	-1.48848983495	2.12675623514	0.727805340002	-0.227234564008	-3.8413042049	-2.30487725273	0.260061682644
+Site1	-1.5541678384	2.37027298265	0.475523558326	1.58712629997e-16	1.39853499536e-15	4.60975450547	-1.41948353841e-14
+Site2	-1.51048450796	2.19216727329	0.00519576944216	0.227234564008	3.8413042049	-2.30487725273	-0.260061682644
+Site3	-0.872786591764	-2.6271708553	2.68871897067	-1.97005774092	0.948287641474	-2.0356145959e-14	0.0289185344306
+Site4	2.97228673755	0.322310666722	2.50294580667	3.50264153009	0.489477682536	-1.25529566747e-14	2.11938273809
+Site5	-0.879968888341	-2.19620098193	0.710888524695	-0.656685913639	0.316095880491	-4.47835211098e-15	0.00963951147681
+Site6	2.64194948913	0.390104638861	-0.086230363198	-0.211189359785	-0.298609965083	-3.88762243221e-15	-4.5952222736
+Site7	-0.887151184918	-1.76523110855	-1.26694192128	0.656685913639	-0.316095880491	1.21458337555e-14	-0.00963951147698
+Site8	2.47314610115	0.521252384288	-2.51313331808	-3.29145217031	-0.190867717454	1.65563320466e-14	2.4758395355
+Site9	-0.894333481495	-1.33426123517	-3.24477236725	1.97005774092	-0.948287641474	3.0262803659e-14	-0.0289185344308
+
+Biplot	4	3
+0.422650019179	-0.559142585857	-0.713250678211
+0.988495963777	0.150787422017	-0.0117848614073
+-0.556516618887	0.817599992718	0.147714267459
+-0.404079676685	-0.9058434809	-0.127150316558
+
+Site constraints	10	7
+Site0	-1.48131076339	2.07063239013	1.42061063192	-0.227234564008	-3.8413042049	-2.30487725273	0.260061682644
+Site1	-1.51771406044	2.22973216369	0.402841555923	1.58712629997e-16	1.39853499536e-15	4.60975450547	-1.41948353841e-14
+Site2	-1.55411735749	2.38883193726	-0.61492752007	0.227234564008	3.8413042049	-2.30487725273	-0.260061682644
+Site3	-0.774350145471	-2.45801536594	2.77528052969	-1.97005774092	0.948287641474	-2.0356145959e-14	0.0289185344306
+Site4	2.76860070338	0.0930230161545	2.00339886045	3.50264153009	0.489477682536	-1.25529566747e-14	2.11938273809
+Site5	-0.847156739577	-2.13981581881	0.739742377702	-0.656685913639	0.316095880491	-4.47835211098e-15	0.00963951147681
+Site6	2.69579410928	0.41122256329	-0.0321392915344	-0.211189359785	-0.298609965083	-3.88762243221e-15	-4.5952222736
+Site7	-0.919963333683	-1.82161627167	-1.29579577429	0.656685913639	-0.316095880491	1.21458337555e-14	-0.00963951147698
+Site8	2.62298751517	0.729422110426	-2.06767744352	-3.29145217031	-0.190867717454	1.65563320466e-14	2.4758395355
+Site9	-0.992769927788	-1.50341672453	-3.33133392627	1.97005774092	-0.948287641474	3.0262803659e-14	-0.0289185344308
diff --git a/skbio/io/tests/data/ordination_example3_scores b/skbio/io/tests/data/ordination_example3_scores
new file mode 100644
index 0000000..ae9ab9d
--- /dev/null
+++ b/skbio/io/tests/data/ordination_example3_scores
@@ -0,0 +1,44 @@
+Eigvals	9
+0.366135830393	0.186887643052	0.0788466514249	0.082287840501	0.0351348475787	0.0233265839374	0.0099048981912	0.00122461669234	0.000417454724117
+
+Proportion explained	0
+
+Species	9	9
+Species0	0.110350890177	0.282399990052	-0.203028976154	-0.00192462284409	-0.082232863384	0.0857314258364	-0.0122038907184	-0.0425198793666	0.00466719926338
+Species1	0.141359038961	0.303495645402	0.395441211576	-0.14126625534	-0.0268859204718	0.143253061936	0.0430260301697	0.0476377655759	-0.00228172378295
+Species2	-1.01552204222	0.0958317865043	-0.198262718034	-0.104801030067	0.130025239749	0.0244045261332	0.0464701211285	0.0269279200532	0.0350103177576
+Species3	-1.03620650502	0.109624974112	0.220984718362	0.223640072997	-0.243745876054	-0.0259064859794	-0.0534088909011	-0.0315611195993	0.0256448427552
+Species4	1.05371722248	0.537178749104	-0.438075060322	0.223480553581	-0.323948461806	0.124644870822	-0.119275907223	0.0416254660329	-0.0381955235096
+Species5	0.998558655	0.573960582723	0.679918103399	-0.389963380717	0.299077945999	0.328451006171	0.21215881857	-0.0829871883001	-0.0439653996462
+Species6	0.255245719879	-0.178168259149	-0.204127155429	0.433397565801	0.0707099230629	-0.18817306522	0.126908756045	0.0044937289123	-0.0122511718244
+Species7	0.146555872394	-0.857362497037	-0.0152499051659	0.0527604990862	0.354475793915	-0.0416813697787	-0.199011239586	-0.00213723187073	-0.00782946141667
+Species8	0.413705102117	-0.707948964322	0.21569736034	-0.690314241725	-0.148431001217	-0.334251868558	-0.00628707445028	-0.00364123416731	-0.0122722164511
+
+Site	10	9
+Site0	0.710587311248	-3.08166800613	0.219651379947	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.584771352278	-3.00669301091	-0.947448656768	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.762734278287	-3.15258603503	2.13924426714	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.11230735331	1.07150585141	-1.87527740873	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.979116769996	-0.0603144289026	-0.696277367656	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.04322560423	0.459426970165	-0.639802790578	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-0.954490118162	-0.0847021660539	0.132509124411	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.947268764751	-0.108370567311	0.526107182587	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.14808173207	0.490449274267	0.478353666755	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	1.03291557934	1.0350490304	2.74691777314	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
+
+Biplot	3	3
+-0.169746767979	0.63069090084	0.760769036049
+-0.994016563505	0.0609533148724	-0.0449369418179
+0.184352565909	-0.974867543612	0.0309865007541
+
+Site constraints	10	9
+Site0	0.692138797603	-3.08053663489	-0.328747278055	-1.24528801163	-1.07293546227	-0.506241907472	0.244126652455	-3.63164833508	1.16311896657
+Site1	0.664559513865	-3.06214571808	0.230249303805	2.69965142856	2.13682885838	0.813520011254	0.471530333182	0.908423015086	-1.34724387844
+Site2	0.636980230127	-3.04375480127	0.789245885666	-3.1162748358	-2.30660936925	-0.698929858809	-1.39062619586	4.84117591747	0.562102984837
+Site3	1.10887578995	0.500396915484	-1.55606822404	0.666370241998	-1.10153224699	1.43517552491	-1.10619960297	0.0137029328454	-0.0371803939101
+Site4	-0.970016224052	0.0654867737684	-1.1206070781	-0.612646703308	0.983006619615	0.315662442163	0.574110232297	0.328630035672	0.868027697443
+Site5	1.05371722248	0.537178749104	-0.438075060322	0.287156643872	-0.573935423877	-1.44980634943	1.70166994063	0.306164261447	-0.442115969758
+Site6	-1.02517479153	0.102268607388	-0.00261391438256	-0.42143341064	-0.111552348931	-0.394242454835	-0.673963982894	-0.379018566362	-1.7472502885
+Site7	0.998558655	0.573960582723	0.679918103399	-0.00565282365567	1.26272400228	-1.06565692165	-1.46326596729	-0.154459216567	0.778139732463
+Site8	-1.080333359	0.139050441007	1.11537924934	1.17015870919	-1.00599224074	0.0735071441404	0.0860462673715	0.0417647558417	0.935819560428
+Site9	0.943400087524	0.610742416342	1.79791126712	-1.28083971649	0.363002636972	1.98647950015	1.05356145232	-0.24813142226	-0.463165215106
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_CCA_site b/skbio/io/tests/data/ordination_exp_Ordination_CCA_site
new file mode 100644
index 0000000..ea7cd5f
--- /dev/null
+++ b/skbio/io/tests/data/ordination_exp_Ordination_CCA_site
@@ -0,0 +1,10 @@
+7.105873112480000087e-01 -3.081668006130000137e+00 2.196513799469999861e-01 -1.245288011630000025e+00 -1.072935462270000029e+00 -5.062419074720000411e-01 2.441266524550000094e-01 -3.631648335079999956e+00 1.163118966569999913e+00
+5.847713522780000339e-01 -3.006693010909999852e+00 -9.474486567680000526e-01 2.699651428560000177e+00 2.136828858379999918e+00 8.135200112539999751e-01 4.715303331819999944e-01 9.084230150859999853e-01 -1.347243878440000042e+00
+7.627342782870000226e-01 -3.152586035030000122e+00 2.139244267140000044e+00 -3.116274835800000087e+00 -2.306609369249999819e+00 -6.989298588090000486e-01 -1.390626195859999914e+00 4.841175917470000201e+00 5.621029848370000526e-01
+1.112307353310000080e+00 1.071505851409999988e+00 -1.875277408729999928e+00 6.663702419979999902e-01 -1.101532246989999964e+00 1.435175524910000000e+00 -1.106199602970000084e+00 1.370293284539999927e-02 -3.718039391010000139e-02
+-9.791167699959999471e-01 -6.031442890259999801e-02 -6.962773676560000125e-01 -6.126467033079999736e-01 9.830066196150000213e-01 3.156624421629999899e-01 5.741102322969999783e-01 3.286300356719999982e-01 8.680276974430000125e-01
+1.043225604230000103e+00 4.594269701650000037e-01 -6.398027905780000468e-01 2.871566438720000169e-01 -5.739354238770000283e-01 -1.449806349429999974e+00 1.701669940629999989e+00 3.061642614469999857e-01 -4.421159697579999937e-01
+-9.544901181620000230e-01 -8.470216605390000486e-02 1.325091244109999900e-01 -4.214334106400000057e-01 -1.115523489309999949e-01 -3.942424548349999780e-01 -6.739639828939999466e-01 -3.790185663619999867e-01 -1.747250288500000082e+00
+9.472687647509999698e-01 -1.083705673110000045e-01 5.261071825869999552e-01 -5.652823655669999593e-03 1.262724002279999924e+00 -1.065656921650000033e+00 -1.463265967289999914e+00 -1.544592165670000128e-01 7.781397324629999446e-01
+-1.148081732070000083e+00 4.904492742670000238e-01 4.783536667550000177e-01 1.170158709190000090e+00 -1.005992240739999932e+00 7.350714414039999367e-02 8.604626737149999316e-02 4.176475584170000199e-02 9.358195604279999635e-01
+1.032915579340000001e+00 1.035049030399999959e+00 2.746917773139999852e+00 -1.280839716490000058e+00 3.630026369720000257e-01 1.986479500149999966e+00 1.053561452320000091e+00 -2.481314222600000119e-01 -4.631652151060000056e-01
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_CCA_site_constraints b/skbio/io/tests/data/ordination_exp_Ordination_CCA_site_constraints
new file mode 100644
index 0000000..c03fb9f
--- /dev/null
+++ b/skbio/io/tests/data/ordination_exp_Ordination_CCA_site_constraints
@@ -0,0 +1,10 @@
+6.921387976030000111e-01 -3.080536634890000069e+00 -3.287472780549999807e-01 -1.245288011630000025e+00 -1.072935462270000029e+00 -5.062419074720000411e-01 2.441266524550000094e-01 -3.631648335079999956e+00 1.163118966569999913e+00
+6.645595138650000067e-01 -3.062145718080000023e+00 2.302493038049999996e-01 2.699651428560000177e+00 2.136828858379999918e+00 8.135200112539999751e-01 4.715303331819999944e-01 9.084230150859999853e-01 -1.347243878440000042e+00
+6.369802301270000022e-01 -3.043754801269999977e+00 7.892458856659999578e-01 -3.116274835800000087e+00 -2.306609369249999819e+00 -6.989298588090000486e-01 -1.390626195859999914e+00 4.841175917470000201e+00 5.621029848370000526e-01
+1.108875789949999913e+00 5.003969154840000044e-01 -1.556068224039999892e+00 6.663702419979999902e-01 -1.101532246989999964e+00 1.435175524910000000e+00 -1.106199602970000084e+00 1.370293284539999927e-02 -3.718039391010000139e-02
+-9.700162240520000534e-01 6.548677376839999453e-02 -1.120607078099999931e+00 -6.126467033079999736e-01 9.830066196150000213e-01 3.156624421629999899e-01 5.741102322969999783e-01 3.286300356719999982e-01 8.680276974430000125e-01
+1.053717222479999993e+00 5.371787491039999862e-01 -4.380750603219999983e-01 2.871566438720000169e-01 -5.739354238770000283e-01 -1.449806349429999974e+00 1.701669940629999989e+00 3.061642614469999857e-01 -4.421159697579999937e-01
+-1.025174791530000018e+00 1.022686073880000046e-01 -2.613914382559999906e-03 -4.214334106400000057e-01 -1.115523489309999949e-01 -3.942424548349999780e-01 -6.739639828939999466e-01 -3.790185663619999867e-01 -1.747250288500000082e+00
+9.985586549999999617e-01 5.739605827229999901e-01 6.799181033990000511e-01 -5.652823655669999593e-03 1.262724002279999924e+00 -1.065656921650000033e+00 -1.463265967289999914e+00 -1.544592165670000128e-01 7.781397324629999446e-01
+-1.080333358999999938e+00 1.390504410070000085e-01 1.115379249340000101e+00 1.170158709190000090e+00 -1.005992240739999932e+00 7.350714414039999367e-02 8.604626737149999316e-02 4.176475584170000199e-02 9.358195604279999635e-01
+9.434000875239999528e-01 6.107424163419999941e-01 1.797911267119999934e+00 -1.280839716490000058e+00 3.630026369720000257e-01 1.986479500149999966e+00 1.053561452320000091e+00 -2.481314222600000119e-01 -4.631652151060000056e-01
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_CCA_species b/skbio/io/tests/data/ordination_exp_Ordination_CCA_species
new file mode 100644
index 0000000..e6e3dde
--- /dev/null
+++ b/skbio/io/tests/data/ordination_exp_Ordination_CCA_species
@@ -0,0 +1,9 @@
+1.103508901770000050e-01 2.823999900520000050e-01 -2.030289761540000071e-01 -1.924622844089999901e-03 -8.223286338400000173e-02 8.573142583640000480e-02 -1.220389071839999943e-02 -4.251987936659999689e-02 4.667199263379999581e-03
+1.413590389609999942e-01 3.034956454020000249e-01 3.954412115759999846e-01 -1.412662553400000076e-01 -2.688592047179999903e-02 1.432530619359999868e-01 4.302603016969999694e-02 4.763776557589999988e-02 -2.281723782949999797e-03
+-1.015522042219999976e+00 9.583178650430000667e-02 -1.982627180340000017e-01 -1.048010300670000061e-01 1.300252397489999878e-01 2.440452613319999894e-02 4.647012112849999849e-02 2.692792005320000098e-02 3.501031775760000270e-02
+-1.036206505020000002e+00 1.096249741120000065e-01 2.209847183620000100e-01 2.236400729970000056e-01 -2.437458760539999991e-01 -2.590648597939999853e-02 -5.340889090110000070e-02 -3.156111959929999711e-02 2.564484275520000078e-02
+1.053717222479999993e+00 5.371787491039999862e-01 -4.380750603219999983e-01 2.234805535809999966e-01 -3.239484618060000098e-01 1.246448708220000007e-01 -1.192759072229999939e-01 4.162546603290000202e-02 -3.819552350960000314e-02
+9.985586549999999617e-01 5.739605827229999901e-01 6.799181033990000511e-01 -3.899633807169999855e-01 2.990779459990000055e-01 3.284510061709999817e-01 2.121588185699999907e-01 -8.298718830010000380e-02 -4.396539964619999852e-02
+2.552457198790000215e-01 -1.781682591490000023e-01 -2.041271554290000068e-01 4.333975658009999732e-01 7.070992306289999862e-02 -1.881730652199999909e-01 1.269087560449999874e-01 4.493728912300000086e-03 -1.225117182439999959e-02
+1.465558723940000030e-01 -8.573624970369999598e-01 -1.524990516589999970e-02 5.276049908620000090e-02 3.544757939150000037e-01 -4.168136977870000315e-02 -1.990112395860000050e-01 -2.137231870730000108e-03 -7.829461416669999951e-03
+4.137051021170000120e-01 -7.079489643220000517e-01 2.156973603400000050e-01 -6.903142417249999996e-01 -1.484310012170000082e-01 -3.342518685579999871e-01 -6.287074450280000053e-03 -3.641234167310000193e-03 -1.227221645110000009e-02
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_PCoA_site b/skbio/io/tests/data/ordination_exp_Ordination_PCoA_site
new file mode 100644
index 0000000..4c389e3
--- /dev/null
+++ b/skbio/io/tests/data/ordination_exp_Ordination_PCoA_site
@@ -0,0 +1,9 @@
+-2.584654611830000115e-01 1.739995468830000114e-01 3.828757925520000216e-02 -1.944775056199999907e-01 8.311760208439999964e-02 2.624303332010000100e-01 -2.316363922349999951e-02 -1.847940395810000053e-02 0.000000000000000000e+00
+-2.710011353910000143e-01 -1.859513190630000118e-02 -8.648419263489999509e-02 1.180642453149999965e-01 -1.988083584370000101e-01 -2.117235995349999941e-02 -1.910240275650000041e-01 1.556465923769999871e-01 0.000000000000000000e+00
+2.350778981749999941e-01 9.625192544890000257e-02 -3.457927267140000205e-01 -3.208625776189999966e-03 -9.637776755190000100e-02 4.570253869529999902e-02 1.854728132859999878e-01 4.040939717929999814e-02 0.000000000000000000e+00
+2.614076643250000040e-02 -1.114596765330000058e-02 1.476606030150000026e-01 2.908766085300000248e-01 2.039454728010000051e-01 6.197123847580000150e-02 1.016413287090000006e-01 1.056909987190000066e-01 0.000000000000000000e+00
+2.850075522830000097e-01 -1.925498884830000035e-02 6.232633753849999708e-02 1.381267998519999929e-01 -1.047986024230000007e-01 9.517207306279999723e-02 -1.296360975420000061e-01 -2.206871703720000022e-01 0.000000000000000000e+00
+2.046363262410000050e-01 -1.393611509319999942e-01 2.915138196229999923e-01 -1.815667868209999980e-01 -1.595801327149999893e-01 -2.464121301619999829e-02 8.662524044409999902e-02 9.962214768709999613e-02 0.000000000000000000e+00
+2.334824032120000059e-01 2.252579740679999942e-01 -1.886230962680000151e-02 -1.077299818309999935e-01 1.771089995719999921e-01 -1.929058351509999880e-01 -1.498194714080000045e-01 3.835490374650000339e-02 0.000000000000000000e+00
+-9.496319113230000664e-02 -4.209748024950000223e-01 -1.548694548690000006e-01 -8.984275092809999863e-02 1.526181944879999863e-01 -3.342326915010000038e-02 -2.512247773030000172e-02 -5.089885364090000058e-02 0.000000000000000000e+00
+-3.599151586379999990e-01 1.138225954350000069e-01 6.622034441379999470e-02 2.975799727879999829e-02 -5.722540781830000312e-02 -1.931335061630000127e-01 1.450263310309999887e-01 -1.496586117379999914e-01 0.000000000000000000e+00
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_RDA_site b/skbio/io/tests/data/ordination_exp_Ordination_RDA_site
new file mode 100644
index 0000000..11f6f3a
--- /dev/null
+++ b/skbio/io/tests/data/ordination_exp_Ordination_RDA_site
@@ -0,0 +1,10 @@
+-1.488489834949999979e+00 2.126756235139999784e+00 7.278053400020000341e-01 -2.272345640079999896e-01 -3.841304204900000130e+00 -2.304877252729999881e+00 2.600616826439999807e-01
+-1.554167838399999946e+00 2.370272982649999971e+00 4.755235583260000176e-01 1.587126299970000006e-16 1.398534995359999983e-15 4.609754505469999764e+00 -1.419483538409999981e-14
+-1.510484507960000000e+00 2.192167273289999940e+00 5.195769442159999842e-03 2.272345640079999896e-01 3.841304204900000130e+00 -2.304877252729999881e+00 -2.600616826439999807e-01
+-8.727865917639999749e-01 -2.627170855300000163e+00 2.688718970670000097e+00 -1.970057740919999967e+00 9.482876414740000204e-01 -2.035614595899999968e-14 2.891853443059999931e-02
+2.972286737550000169e+00 3.223106667219999832e-01 2.502945806670000106e+00 3.502641530090000010e+00 4.894776825359999917e-01 -1.255295667470000031e-14 2.119382738090000107e+00
+-8.799688883409999551e-01 -2.196200981930000129e+00 7.108885246949999681e-01 -6.566859136389999740e-01 3.160958804910000142e-01 -4.478352110979999834e-15 9.639511476809999382e-03
+2.641949489129999940e+00 3.901046388609999971e-01 -8.623036319799999572e-02 -2.111893597850000026e-01 -2.986099650829999730e-01 -3.887622432210000342e-15 -4.595222273600000129e+00
+-8.871511849180000464e-01 -1.765231108550000094e+00 -1.266941921279999939e+00 6.566859136389999740e-01 -3.160958804910000142e-01 1.214583375549999947e-14 -9.639511476980000548e-03
+2.473146101150000220e+00 5.212523842880000258e-01 -2.513133318079999956e+00 -3.291452170309999925e+00 -1.908677174539999966e-01 1.655633204660000023e-14 2.475839535500000022e+00
+-8.943334814950000267e-01 -1.334261235170000059e+00 -3.244772367249999956e+00 1.970057740919999967e+00 -9.482876414740000204e-01 3.026280365900000082e-14 -2.891853443079999905e-02
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_RDA_site_constraints b/skbio/io/tests/data/ordination_exp_Ordination_RDA_site_constraints
new file mode 100644
index 0000000..fbceac8
--- /dev/null
+++ b/skbio/io/tests/data/ordination_exp_Ordination_RDA_site_constraints
@@ -0,0 +1,10 @@
+-1.481310763390000007e+00 2.070632390130000111e+00 1.420610631920000033e+00 -2.272345640079999896e-01 -3.841304204900000130e+00 -2.304877252729999881e+00 2.600616826439999807e-01
+-1.517714060439999901e+00 2.229732163690000046e+00 4.028415559230000276e-01 1.587126299970000006e-16 1.398534995359999983e-15 4.609754505469999764e+00 -1.419483538409999981e-14
+-1.554117357490000018e+00 2.388831937259999982e+00 -6.149275200699999555e-01 2.272345640079999896e-01 3.841304204900000130e+00 -2.304877252729999881e+00 -2.600616826439999807e-01
+-7.743501454710000065e-01 -2.458015365940000141e+00 2.775280529689999831e+00 -1.970057740919999967e+00 9.482876414740000204e-01 -2.035614595899999968e-14 2.891853443059999931e-02
+2.768600703380000194e+00 9.302301615449999617e-02 2.003398860449999930e+00 3.502641530090000010e+00 4.894776825359999917e-01 -1.255295667470000031e-14 2.119382738090000107e+00
+-8.471567395769999953e-01 -2.139815818809999826e+00 7.397423777020000202e-01 -6.566859136389999740e-01 3.160958804910000142e-01 -4.478352110979999834e-15 9.639511476809999382e-03
+2.695794109279999962e+00 4.112225632900000094e-01 -3.213929153439999686e-02 -2.111893597850000026e-01 -2.986099650829999730e-01 -3.887622432210000342e-15 -4.595222273600000129e+00
+-9.199633336829999841e-01 -1.821616271669999954e+00 -1.295795774289999924e+00 6.566859136389999740e-01 -3.160958804910000142e-01 1.214583375549999947e-14 -9.639511476980000548e-03
+2.622987515170000172e+00 7.294221104260000255e-01 -2.067677443520000047e+00 -3.291452170309999925e+00 -1.908677174539999966e-01 1.655633204660000023e-14 2.475839535500000022e+00
+-9.927699277879999951e-01 -1.503416724530000081e+00 -3.331333926270000134e+00 1.970057740919999967e+00 -9.482876414740000204e-01 3.026280365900000082e-14 -2.891853443079999905e-02
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_RDA_species b/skbio/io/tests/data/ordination_exp_Ordination_RDA_species
new file mode 100644
index 0000000..ad9a991
--- /dev/null
+++ b/skbio/io/tests/data/ordination_exp_Ordination_RDA_species
@@ -0,0 +1,6 @@
+1.381987139010000032e+00 -1.714964261790000055e+00 6.322724552880000237e-01 7.128982315749999785e-03 1.205124311330000031e-01 -7.231043061790000392e-02 -8.158860623439999696e-03
+9.191783806719999728e-01 -1.254307679060000069e+00 -1.178742689599999949e+00 -7.128982315759999598e-03 -1.205124311330000031e-01 -7.231043061790000392e-02 8.158860623439999696e-03
+3.398972348690000089e+00 4.461683155150000246e-01 4.066916104230000051e-01 7.493366680140000069e-01 7.938928127809999347e-02 7.379714016829999409e-17 3.294181709360000099e-02
+2.523532618950000028e+00 4.469328227230000006e-01 -4.134120465830000146e-01 -6.394490299449999693e-01 -6.403300060840000363e-02 3.406021853920000005e-17 3.354913302260000008e-02
+-5.315534141100000110e-01 -1.342639857440000029e+00 4.641556491959999975e-01 -4.120413886650000079e-01 1.983361951950000091e-01 7.379714016829999409e-17 6.048367434850000432e-03
+-2.886181671170000018e-01 -5.714918521970000498e-01 -4.065272904239999874e-01 2.060206943329999929e-01 -9.916809759729999785e-02 -1.135340617969999987e-17 -3.024183717430000123e-03
diff --git a/skbio/io/tests/data/phylip_dna_3_seqs b/skbio/io/tests/data/phylip_dna_3_seqs
new file mode 100644
index 0000000..3db4fe9
--- /dev/null
+++ b/skbio/io/tests/data/phylip_dna_3_seqs
@@ -0,0 +1,4 @@
+3 13
+d1        ..ACC-GTTG G..
+d2        TTACCGGT-G GCC
+d3        .-ACC-GTTG C--
diff --git a/skbio/io/tests/data/phylip_single_seq_long b/skbio/io/tests/data/phylip_single_seq_long
new file mode 100644
index 0000000..b4e9f09
--- /dev/null
+++ b/skbio/io/tests/data/phylip_single_seq_long
@@ -0,0 +1,2 @@
+1 24
+foo       ..ACC-GTTG G..AATGC.C ----
diff --git a/skbio/io/tests/data/phylip_single_seq_short b/skbio/io/tests/data/phylip_single_seq_short
new file mode 100644
index 0000000..c1bcdae
--- /dev/null
+++ b/skbio/io/tests/data/phylip_single_seq_short
@@ -0,0 +1,2 @@
+1 1
+          -
diff --git a/skbio/io/tests/data/phylip_two_chunks b/skbio/io/tests/data/phylip_two_chunks
new file mode 100644
index 0000000..ba982cd
--- /dev/null
+++ b/skbio/io/tests/data/phylip_two_chunks
@@ -0,0 +1,3 @@
+2 20
+foo       ..ACC-GTTG G..AATGC.C
+bar       TTACCGGT-G GCCTA-GCAT
diff --git a/skbio/io/tests/data/phylip_variable_length_ids b/skbio/io/tests/data/phylip_variable_length_ids
new file mode 100644
index 0000000..d658257
--- /dev/null
+++ b/skbio/io/tests/data/phylip_variable_length_ids
@@ -0,0 +1,7 @@
+6 6
+          .-ACGU
+a         UGCA-.
+bb        .ACGU-
+1         ugca-.
+abcdefghijAaAaAa
+ab def42ijGGGGGG
diff --git a/skbio/io/tests/data/qseq_invalid_filter b/skbio/io/tests/data/qseq_invalid_filter
new file mode 100644
index 0000000..0c5759b
--- /dev/null
+++ b/skbio/io/tests/data/qseq_invalid_filter
@@ -0,0 +1 @@
+sanger	1	3	34	-30	30	0	2	ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC	;;>@BCEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~	3
diff --git a/skbio/io/tests/data/qseq_invalid_lane b/skbio/io/tests/data/qseq_invalid_lane
new file mode 100644
index 0000000..3a892e4
--- /dev/null
+++ b/skbio/io/tests/data/qseq_invalid_lane
@@ -0,0 +1 @@
+sanger	1	-3	34	12	-2	0	2	ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC	;;>@BCEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~	1
diff --git a/skbio/io/tests/data/qseq_invalid_read b/skbio/io/tests/data/qseq_invalid_read
new file mode 100644
index 0000000..7784833
--- /dev/null
+++ b/skbio/io/tests/data/qseq_invalid_read
@@ -0,0 +1 @@
+sanger	1	3	34	-30	30	0	4	ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC	;;>@BCEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~	1
diff --git a/skbio/io/tests/data/qseq_invalid_tile b/skbio/io/tests/data/qseq_invalid_tile
new file mode 100644
index 0000000..e0c7b5e
--- /dev/null
+++ b/skbio/io/tests/data/qseq_invalid_tile
@@ -0,0 +1 @@
+sanger	1	5	-4	12	-2	0	2	ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC	;;>@BCEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~	1
diff --git a/skbio/io/tests/data/qseq_invalid_x b/skbio/io/tests/data/qseq_invalid_x
new file mode 100644
index 0000000..4d2fb5a
--- /dev/null
+++ b/skbio/io/tests/data/qseq_invalid_x
@@ -0,0 +1 @@
+sanger	1	3	34	if_you're_a_shepherd	42	0	2	ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC	;;>@BCEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~	1
diff --git a/skbio/io/tests/data/qseq_invalid_y b/skbio/io/tests/data/qseq_invalid_y
new file mode 100644
index 0000000..03f1532
--- /dev/null
+++ b/skbio/io/tests/data/qseq_invalid_y
@@ -0,0 +1 @@
+sanger	1	3	34	42	and_a_snake_is_killing_all_your_sheep	0	2	ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC	;;>@BCEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~	1
diff --git a/skbio/io/tests/data/qseq_multi_seq_illumina1.3 b/skbio/io/tests/data/qseq_multi_seq_illumina1.3
new file mode 100644
index 0000000..88a56d8
--- /dev/null
+++ b/skbio/io/tests/data/qseq_multi_seq_illumina1.3
@@ -0,0 +1,4 @@
+illumina	1	3	34	-30	30	0	1	ACG....ACGTAC	ruBBBBrBCEFGH	1
+illumina	1	3	34	30	-30	0	1	CGGGCATTGCA	CGGGCasdGCA	0
+illumina	1	3	35	-30	30	0	2	ACGTA.AATAAAC	geTaAafhwqAAf	1
+illumina	1	3	35	30	-30	0	3	CATTTAGGA.TGCA	tjflkAFnkKghvM	0
diff --git a/skbio/io/tests/data/qseq_single_seq_sanger b/skbio/io/tests/data/qseq_single_seq_sanger
new file mode 100644
index 0000000..d0b36d8
--- /dev/null
+++ b/skbio/io/tests/data/qseq_single_seq_sanger
@@ -0,0 +1 @@
+sanger	1	3	34	-30	30	0	2	ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC	;;>@BCEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~	1
diff --git a/skbio/io/tests/data/qual_2_seqs_defaults b/skbio/io/tests/data/qual_2_seqs_defaults
new file mode 100644
index 0000000..91eae1d
--- /dev/null
+++ b/skbio/io/tests/data/qual_2_seqs_defaults
@@ -0,0 +1,4 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults b/skbio/io/tests/data/qual_3_seqs_defaults
new file mode 100644
index 0000000..7aa4abc
--- /dev/null
+++ b/skbio/io/tests/data/qual_3_seqs_defaults
@@ -0,0 +1,8 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_desc_mismatch b/skbio/io/tests/data/qual_3_seqs_defaults_desc_mismatch
new file mode 100644
index 0000000..f7a599b
--- /dev/null
+++ b/skbio/io/tests/data/qual_3_seqs_defaults_desc_mismatch
@@ -0,0 +1,8 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2 desc 42
+ 42    41 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_extra b/skbio/io/tests/data/qual_3_seqs_defaults_extra
new file mode 100644
index 0000000..27a0407
--- /dev/null
+++ b/skbio/io/tests/data/qual_3_seqs_defaults_extra
@@ -0,0 +1,10 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+42  
+>s_e_q_4 desc 4
+42 42 42
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_id_mismatch b/skbio/io/tests/data/qual_3_seqs_defaults_id_mismatch
new file mode 100644
index 0000000..5663f73
--- /dev/null
+++ b/skbio/io/tests/data/qual_3_seqs_defaults_id_mismatch
@@ -0,0 +1,8 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_42 desc 2
+ 42    41 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_length_mismatch b/skbio/io/tests/data/qual_3_seqs_defaults_length_mismatch
new file mode 100644
index 0000000..8e58f2b
--- /dev/null
+++ b/skbio/io/tests/data/qual_3_seqs_defaults_length_mismatch
@@ -0,0 +1,8 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_3_seqs_non_defaults b/skbio/io/tests/data/qual_3_seqs_non_defaults
new file mode 100644
index 0000000..b9cc994
--- /dev/null
+++ b/skbio/io/tests/data/qual_3_seqs_non_defaults
@@ -0,0 +1,14 @@
+>s*e*q*1 desc+1
+1234
+0 0
+2
+>s*e*q*2 desc+2
+1
+11
+111
+11112
+>s*e*q*3 desc+3
+12345
+678909
+999999
+4242424242
diff --git a/skbio/io/tests/data/qual_description_newline_replacement_empty_str b/skbio/io/tests/data/qual_description_newline_replacement_empty_str
new file mode 100644
index 0000000..d1ec06d
--- /dev/null
+++ b/skbio/io/tests/data/qual_description_newline_replacement_empty_str
@@ -0,0 +1,4 @@
+>proteinseq detaileddescription 		with  newlines
+42 42 442 442 42 42 42 42 42 43
+>foo
+0 1 2 3 4 5 6 7 8
diff --git a/skbio/io/tests/data/qual_description_newline_replacement_multi_char b/skbio/io/tests/data/qual_description_newline_replacement_multi_char
new file mode 100644
index 0000000..567f300
--- /dev/null
+++ b/skbio/io/tests/data/qual_description_newline_replacement_multi_char
@@ -0,0 +1,4 @@
+>proteinseq :-)detailed:-)description 		with  new:-):-)lines:-):-):-)
+42 42 442 442 42 42 42 42 42 43
+>foo :-):-):-):-)
+0 1 2 3 4 5 6 7 8
diff --git a/skbio/io/tests/data/qual_description_newline_replacement_none b/skbio/io/tests/data/qual_description_newline_replacement_none
new file mode 100644
index 0000000..861548f
--- /dev/null
+++ b/skbio/io/tests/data/qual_description_newline_replacement_none
@@ -0,0 +1,15 @@
+>proteinseq 
+detailed
+description 		with  new
+
+lines
+
+
+
+42 42 442 442 42 42 42 42 42 43
+>foo 
+
+
+
+
+0 1 2 3 4 5 6 7 8
diff --git a/skbio/io/tests/data/qual_id_whitespace_replacement_empty_str b/skbio/io/tests/data/qual_id_whitespace_replacement_empty_str
new file mode 100644
index 0000000..456d07e
--- /dev/null
+++ b/skbio/io/tests/data/qual_id_whitespace_replacement_empty_str
@@ -0,0 +1,4 @@
+>seq2
+42
+> a b
+1000 1
diff --git a/skbio/io/tests/data/qual_id_whitespace_replacement_multi_char b/skbio/io/tests/data/qual_id_whitespace_replacement_multi_char
new file mode 100644
index 0000000..b13fd23
--- /dev/null
+++ b/skbio/io/tests/data/qual_id_whitespace_replacement_multi_char
@@ -0,0 +1,4 @@
+>>:o>:o>:o>:o>:oseq>:o>:o2>:o
+42
+>>:o>:o>:o>:o a b
+1000 1
diff --git a/skbio/io/tests/data/qual_id_whitespace_replacement_none b/skbio/io/tests/data/qual_id_whitespace_replacement_none
new file mode 100644
index 0000000..bcb1783
--- /dev/null
+++ b/skbio/io/tests/data/qual_id_whitespace_replacement_none
@@ -0,0 +1,7 @@
+> 
+  
+seq 	2 
+42
+>
+	 	 a b
+1000 1
diff --git a/skbio/io/tests/data/qual_invalid_blank_line b/skbio/io/tests/data/qual_invalid_blank_line
new file mode 100644
index 0000000..2201875
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_blank_line
@@ -0,0 +1,9 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_invalid_legacy_format b/skbio/io/tests/data/qual_invalid_legacy_format
new file mode 100644
index 0000000..ea35d20
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_legacy_format
@@ -0,0 +1,2 @@
+; legacy-seq-id legacy description
+40 30 20 10
diff --git a/skbio/io/tests/data/qual_invalid_missing_header b/skbio/io/tests/data/qual_invalid_missing_header
new file mode 100644
index 0000000..9553718
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_missing_header
@@ -0,0 +1,2 @@
+seq1 desc1
+1 2 3 4
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_first b/skbio/io/tests/data/qual_invalid_missing_qual_scores_first
new file mode 100644
index 0000000..fe5cb56
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_missing_qual_scores_first
@@ -0,0 +1,7 @@
+>s_e_q_1 desc 1
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_last b/skbio/io/tests/data/qual_invalid_missing_qual_scores_last
new file mode 100644
index 0000000..ae60f8c
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_missing_qual_scores_last
@@ -0,0 +1,5 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+>s_e_q_3 desc 3
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle b/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle
new file mode 100644
index 0000000..ef86125
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle
@@ -0,0 +1,7 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_invalid_qual_scores_float b/skbio/io/tests/data/qual_invalid_qual_scores_float
new file mode 100644
index 0000000..8bd5244
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_qual_scores_float
@@ -0,0 +1,8 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41.0 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_invalid_qual_scores_negative b/skbio/io/tests/data/qual_invalid_qual_scores_negative
new file mode 100644
index 0000000..7aa4abc
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_qual_scores_negative
@@ -0,0 +1,8 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_invalid_qual_scores_string b/skbio/io/tests/data/qual_invalid_qual_scores_string
new file mode 100644
index 0000000..3440f3a
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_qual_scores_string
@@ -0,0 +1,8 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+>s_e_q_3 desc 3
+100 0
+ 1a
+-42  
diff --git a/skbio/io/tests/data/qual_invalid_whitespace_only_line b/skbio/io/tests/data/qual_invalid_whitespace_only_line
new file mode 100644
index 0000000..cd51bf4
--- /dev/null
+++ b/skbio/io/tests/data/qual_invalid_whitespace_only_line
@@ -0,0 +1,9 @@
+>s_e_q_1 desc 1
+1 2 3 4
+>s_e_q_2          desc 2   
+ 42    41 39 40 
+		     	   
+>s_e_q_3 desc 3
+100 0
+ 1
+-42  
diff --git a/skbio/io/tests/data/qual_max_width_1 b/skbio/io/tests/data/qual_max_width_1
new file mode 100644
index 0000000..719dd85
--- /dev/null
+++ b/skbio/io/tests/data/qual_max_width_1
@@ -0,0 +1,11 @@
+>seq1 desc1
+10
+20
+30
+10
+0
+0
+0
+88888
+1
+3456
diff --git a/skbio/io/tests/data/qual_max_width_5 b/skbio/io/tests/data/qual_max_width_5
new file mode 100644
index 0000000..708f97b
--- /dev/null
+++ b/skbio/io/tests/data/qual_max_width_5
@@ -0,0 +1,34 @@
+>seq1 desc1
+10 20
+30 10
+0 0 0
+88888
+1
+3456
+>_____seq__2_
+42
+> desc3
+0 0 0
+0 0 0
+0
+>
+1 2 3
+4 5 6
+777
+>
+55 10
+0 999
+1 1 8
+775
+40 10
+10 0
+>
+10 9
+8 7 6
+>proteinseq  detailed description 		with  new  lines   
+42 42
+442
+442
+42 42
+42 42
+42 43
diff --git a/skbio/io/tests/data/qual_multi_seq b/skbio/io/tests/data/qual_multi_seq
new file mode 100644
index 0000000..392dfe7
--- /dev/null
+++ b/skbio/io/tests/data/qual_multi_seq
@@ -0,0 +1,14 @@
+>seq1 desc1
+10 20 30 10 0 0 0 88888 1 3456
+>_____seq__2_
+42
+> desc3
+0 0 0 0 0 0 0
+>
+1 2 3 4 5 6 777
+>
+55 10 0 999 1 1 8 775 40 10 10 0
+>
+10 9 8 7 6
+>proteinseq  detailed description 		with  new  lines   
+42 42 442 442 42 42 42 42 42 43
diff --git a/skbio/io/tests/data/qual_multi_seq_roundtrip b/skbio/io/tests/data/qual_multi_seq_roundtrip
new file mode 100644
index 0000000..87fee93
--- /dev/null
+++ b/skbio/io/tests/data/qual_multi_seq_roundtrip
@@ -0,0 +1,6 @@
+>seq-a a's description
+0 1 2 3 4 5 6 7 8
+>seq-b b's description
+0 1 2 3 4 5 6 7 8 9 10 11
+>seq-c c's description
+0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
diff --git a/skbio/io/tests/data/qual_prot_seqs_odd_labels b/skbio/io/tests/data/qual_prot_seqs_odd_labels
new file mode 100644
index 0000000..0a2443d
--- /dev/null
+++ b/skbio/io/tests/data/qual_prot_seqs_odd_labels
@@ -0,0 +1,8 @@
+>	 		     
+  	-0000000     0000                             01 5			   	
+	 00044 -0 		  
+>  	 skbio       			   
+1
+ 2
+  33  
+   123456789
diff --git a/skbio/io/tests/data/qual_sequence_collection_different_type b/skbio/io/tests/data/qual_sequence_collection_different_type
new file mode 100644
index 0000000..b61c4fb
--- /dev/null
+++ b/skbio/io/tests/data/qual_sequence_collection_different_type
@@ -0,0 +1,6 @@
+> 
+20 20 21
+>rnaseq-1 rnaseq desc 1  
+10 9 10
+>rnaseq-2        rnaseq desc 2
+9 99 999
diff --git a/skbio/io/tests/data/qual_single_bio_seq_non_defaults b/skbio/io/tests/data/qual_single_bio_seq_non_defaults
new file mode 100644
index 0000000..1ca0892
--- /dev/null
+++ b/skbio/io/tests/data/qual_single_bio_seq_non_defaults
@@ -0,0 +1,5 @@
+>f-o-o b_a_r
+1
+2
+3
+4
diff --git a/skbio/io/tests/data/qual_single_dna_seq_non_defaults b/skbio/io/tests/data/qual_single_dna_seq_non_defaults
new file mode 100644
index 0000000..2733613
--- /dev/null
+++ b/skbio/io/tests/data/qual_single_dna_seq_non_defaults
@@ -0,0 +1,5 @@
+>f-o-o b_a_r
+0
+1
+2
+3
diff --git a/skbio/io/tests/data/qual_single_nuc_seq_non_defaults b/skbio/io/tests/data/qual_single_nuc_seq_non_defaults
new file mode 100644
index 0000000..2b36cbe
--- /dev/null
+++ b/skbio/io/tests/data/qual_single_nuc_seq_non_defaults
@@ -0,0 +1,6 @@
+>f-o-o b_a_r
+0
+1
+2
+3
+4
diff --git a/skbio/io/tests/data/qual_single_prot_seq_non_defaults b/skbio/io/tests/data/qual_single_prot_seq_non_defaults
new file mode 100644
index 0000000..353693a
--- /dev/null
+++ b/skbio/io/tests/data/qual_single_prot_seq_non_defaults
@@ -0,0 +1,4 @@
+>f-o-o b_a_r
+42
+41
+40
diff --git a/skbio/io/tests/data/qual_single_rna_seq_non_defaults b/skbio/io/tests/data/qual_single_rna_seq_non_defaults
new file mode 100644
index 0000000..73da5d5
--- /dev/null
+++ b/skbio/io/tests/data/qual_single_rna_seq_non_defaults
@@ -0,0 +1,5 @@
+>f-o-o b_a_r
+2
+3
+4
+5
diff --git a/skbio/io/tests/data/qual_single_seq b/skbio/io/tests/data/qual_single_seq
new file mode 100644
index 0000000..967dc14
--- /dev/null
+++ b/skbio/io/tests/data/qual_single_seq
@@ -0,0 +1,2 @@
+>seq1 desc1
+10 20 30 10 0 0 0 88888 1 3456
diff --git a/skbio/io/tests/data/real_file b/skbio/io/tests/data/real_file
new file mode 100644
index 0000000..9405325
--- /dev/null
+++ b/skbio/io/tests/data/real_file
@@ -0,0 +1,5 @@
+a
+b
+c
+d
+e
diff --git a/skbio/io/tests/data/real_file_2 b/skbio/io/tests/data/real_file_2
new file mode 100644
index 0000000..009f7f5
--- /dev/null
+++ b/skbio/io/tests/data/real_file_2
@@ -0,0 +1,6 @@
+!
+@
+#
+$
+%
+The realest.
diff --git a/skbio/io/tests/data/sanger_full_range_as_illumina.fastq b/skbio/io/tests/data/sanger_full_range_as_illumina.fastq
new file mode 100644
index 0000000..7a9cc7e
--- /dev/null
+++ b/skbio/io/tests/data/sanger_full_range_as_illumina.fastq
@@ -0,0 +1,8 @@
+ at FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+ at ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ at FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCA
++
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@
diff --git a/skbio/io/tests/data/sanger_full_range_as_sanger.fastq b/skbio/io/tests/data/sanger_full_range_as_sanger.fastq
new file mode 100644
index 0000000..d34b78d
--- /dev/null
+++ b/skbio/io/tests/data/sanger_full_range_as_sanger.fastq
@@ -0,0 +1,8 @@
+ at FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+ at FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCA
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff --git a/skbio/io/tests/data/sanger_full_range_original_sanger.fastq b/skbio/io/tests/data/sanger_full_range_original_sanger.fastq
new file mode 100644
index 0000000..d34b78d
--- /dev/null
+++ b/skbio/io/tests/data/sanger_full_range_original_sanger.fastq
@@ -0,0 +1,8 @@
+ at FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+ at FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCA
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff --git a/skbio/io/tests/data/solexa_full_range_original_solexa.fastq b/skbio/io/tests/data/solexa_full_range_original_solexa.fastq
new file mode 100644
index 0000000..787ad58
--- /dev/null
+++ b/skbio/io/tests/data/solexa_full_range_original_solexa.fastq
@@ -0,0 +1,8 @@
+ at FAKE0003 Original version has Solexa scores from -5 to 62 inclusive (in that order)
+ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT
++
+;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+ at FAKE0004 Original version has Solexa scores from 62 to -5 inclusive (in that order)
+TGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCA
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;
diff --git a/skbio/io/tests/data/tsv_10_fields b/skbio/io/tests/data/tsv_10_fields
new file mode 100644
index 0000000..64eb131
--- /dev/null
+++ b/skbio/io/tests/data/tsv_10_fields
@@ -0,0 +1,6 @@
+buh	na	de	nuh	buh	KRRGRHRGRBAWAOMPWAMPOWAMP	na	de	nuh	de
+1	1	1	1	1	1	1	1	1	1
+3	3	3	3	3	3 	$	3	3	3
+4	4	4	4	4	4	4	%	4	4
+4	4	4	4	4	4	4	4	%	4
+5	5	5	5	5	5	5	wat	6	^
diff --git a/skbio/io/tests/data/tsv_8_fields b/skbio/io/tests/data/tsv_8_fields
new file mode 100644
index 0000000..7bc89ef
--- /dev/null
+++ b/skbio/io/tests/data/tsv_8_fields
@@ -0,0 +1,6 @@
+buh	na	de	nuh	buh	KRRGRHRGRBAWAOMPWAMPOWAMP	na	de
+1	1	1	1	1	1	1	1
+3	3	3	3	3	3 	$	3
+4	4	4	4	4	4	4	%
+4	4	4	4	4	4	4	4
+5	5	5	5	5	5	5	wat
diff --git a/skbio/io/tests/data/whitespace_only b/skbio/io/tests/data/whitespace_only
new file mode 100644
index 0000000..40c5ce4
--- /dev/null
+++ b/skbio/io/tests/data/whitespace_only
@@ -0,0 +1,17 @@
+
+
+
+
+			
+		    	 
+         
+
+   
+ 	 	
+
+		
+
+
+
+
+ 
diff --git a/skbio/io/tests/data/wrapping_as_illumina.fastq b/skbio/io/tests/data/wrapping_as_illumina.fastq
new file mode 100644
index 0000000..4a1240a
--- /dev/null
+++ b/skbio/io/tests/data/wrapping_as_illumina.fastq
@@ -0,0 +1,12 @@
+ at SRR014849.50939 EIXKN4201BA2EC length=135
+GAAATTTCAGGGCCACCTTTTTTTTGATAGAATAATGGAGAAAATTAAAAGCTGTACATATACCAATGAACAATAAATCAATACATAAAAAAGGAGAAGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTCGG
++
+Zb^Ld`N\[d`NaZ[aZc]UOKHDA[\YT[_W[aZ\aZ[Zd`SF_WeaUI[Y\[[\\\[\Z\aY`X[[aZ\aZ\d`OY[aY[[\[[e`WPJC^UZ[`X\[R]T_V_W[`[Ga\I`\H[[Q^TVa\Ia\Ic^LY\S
+ at SRR014849.110027 EIXKN4201APUB0 length=131
+CTTCAAATGATTCCGGGACTGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTTCGGTTCCAACTCGCCGTCCGAATAATCCGTTCAAAATCTTGGCCTGTCAAAACGACTTTACGACCAGAACGATCCG
++
+\aYY_[FY\T`X^Vd`OY\[[^U_V[R^T[_ZDc^La\HYYO\S[c^Ld`Nc_QAZaZaYaY`XZZ\[aZZ[aZ[aZ[aZY`Z[`ZWeaVJ\[aZaY`X[PY\eaUG[\[[d`OXTUZ[Q\\`W\\\Y_W\
+ at SRR014849.203935 EIXKN4201B4HU6 length=144
+AACCCGTCCCATCAAAGATTTTGGTTGGAACCCGAAAGGGTTTTGAATTCAAACCCCTTTCGGTTCCAACTATTCAATTGTTTAACTTTTTTTAAATTGATGGTCTGTTGGACCATTTGTAATAATCCCCATCGGAATTTCTTT
++
+`Z_ZDVT^YB[[Xd`PZ\d`RDaZaZ`ZaZ_ZDXd`Pd`Pd`RD[aZ`ZWd`Oc_RCd`P\aZ`ZaZaZY\YaZYaY`XYd`O`X[e`WPJEAc^LaZS[YYN[Z\Y`XWLT^U\b]JW[[RZ\SYc`RD[Z\WLXM`\HYa\I
diff --git a/skbio/io/tests/data/wrapping_as_sanger.fastq b/skbio/io/tests/data/wrapping_as_sanger.fastq
new file mode 100644
index 0000000..cd4d11d
--- /dev/null
+++ b/skbio/io/tests/data/wrapping_as_sanger.fastq
@@ -0,0 +1,12 @@
+ at SRR014849.50939 EIXKN4201BA2EC length=135
+GAAATTTCAGGGCCACCTTTTTTTTGATAGAATAATGGAGAAAATTAAAAGCTGTACATATACCAATGAACAATAAATCAATACATAAAAAAGGAGAAGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTCGG
++
+;C?-EA/=<EA/B;<B;D>60,)%"<=:5<@8<B;=B;<;EA4'@8FB6*<:=<<===<=;=B:A9<<B;=B;=EA0:<B:<<=<<FA81+$?6;<A9=<3>5 at 7@8<A<(B=*A=)<<2?57B=*B=*D?-:=4
+ at SRR014849.110027 EIXKN4201APUB0 length=131
+CTTCAAATGATTCCGGGACTGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTTCGGTTCCAACTCGCCGTCCGAATAATCCGTTCAAAATCTTGGCCTGTCAAAACGACTTTACGACCAGAACGATCCG
++
+=B::@<':=5A9?7EA0:=<<?6 at 7<3?5<@;%D?-B=)::0=4<D?-EA/D at 2";B;B:B:A9;;=<B;;<B;<B;<B;:A;<A;8FB7+=<B;B:A9<1:=FB6(<=<<EA0956;<2==A8===:@8=
+ at SRR014849.203935 EIXKN4201B4HU6 length=144
+AACCCGTCCCATCAAAGATTTTGGTTGGAACCCGAAAGGGTTTTGAATTCAAACCCCTTTCGGTTCCAACTATTCAATTGTTTAACTTTTTTTAAATTGATGGTCTGTTGGACCATTTGTAATAATCCCCATCGGAATTTCTTT
++
+A;@;%75?:#<<9EA1;=EA3%B;B;A;B;@;%9EA1EA1EA3%<B;A;8EA0D at 3$EA1=B;A;B;B;:=:B;:B:A9:EA0A9<FA81+&"D?-B;4<::/<;=:A98-5?6=C>+8<<3;=4:DA3%<;=8-9.A=):B=*
diff --git a/skbio/io/tests/data/wrapping_original_sanger.fastq b/skbio/io/tests/data/wrapping_original_sanger.fastq
new file mode 100644
index 0000000..d12d6f6
--- /dev/null
+++ b/skbio/io/tests/data/wrapping_original_sanger.fastq
@@ -0,0 +1,24 @@
+ at SRR014849.50939 EIXKN4201BA2EC length=135
+GAAATTTCAGGGCCACCTTTTTTTTGATAGAATAATGGAGAAAATTAAAAGCTGTACATATACCAATGAACAATAAATCAATACATAAAAAAGGAGAAGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTCGG
++
+;C?-EA/=<EA/B;<B;D>60,)%"<=:5<
+ at 8<B;=B;<;EA4'@8FB6*<:=<<===<=
+;=B:A9<<B;=B;=EA0:<B:<<=<<FA81
++$?6;<A9=<3>5 at 7@8<A<(B=*A=)<<2
+?57B=*B=*D?-:=4
+ at SRR014849.110027 EIXKN4201APUB0 length=131
+CTTCAAATGATTCCGGGACTGTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTTCGGTTCCAACTCGCCGTCCGAATAATCCGTTCAAAATCTTGGCCTGTCAAAACGACTTTACGACCAGAACGATCCG
++
+=B::@<':=5A9?7EA0:=<<?6 at 7<3?5<
+@;%D?-B=)::0=4<D?-EA/D at 2";B;B:
+B:A9;;=<B;;<B;<B;<B;:A;<A;8FB7
++=<B;B:A9<1:=FB6(<=<<EA0956;<2
+==A8===:@8=
+ at SRR014849.203935 EIXKN4201B4HU6 length=144
+AACCCGTCCCATCAAAGATTTTGGTTGGAACCCGAAAGGGTTTTGAATTCAAACCCCTTTCGGTTCCAACTATTCAATTGTTTAACTTTTTTTAAATTGATGGTCTGTTGGACCATTTGTAATAATCCCCATCGGAATTTCTTT
++
+A;@;%75?:#<<9EA1;=EA3%B;B;A;B;
+@;%9EA1EA1EA3%<B;A;8EA0D at 3$EA1
+=B;A;B;B;:=:B;:B:A9:EA0A9<FA81
++&"D?-B;4<::/<;=:A98-5?6=C>+8<
+<3;=4:DA3%<;=8-9.A=):B=*
diff --git a/skbio/io/tests/test_base.py b/skbio/io/tests/test_base.py
new file mode 100644
index 0000000..ff7d429
--- /dev/null
+++ b/skbio/io/tests/test_base.py
@@ -0,0 +1,359 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range, zip
+
+import unittest
+
+import numpy.testing as npt
+
+from skbio import BiologicalSequence, DNASequence, RNASequence
+from skbio.io._base import (_chunk_str, _decode_qual_to_phred,
+                            _encode_phred_to_qual, _get_nth_sequence,
+                            _parse_fasta_like_header,
+                            _format_fasta_like_records)
+
+
+class ChunkStrTests(unittest.TestCase):
+    def test_even_split(self):
+        self.assertEqual(_chunk_str('abcdef', 6, ' '), 'abcdef')
+        self.assertEqual(_chunk_str('abcdef', 3, ' '), 'abc def')
+        self.assertEqual(_chunk_str('abcdef', 2, ' '), 'ab cd ef')
+        self.assertEqual(_chunk_str('abcdef', 1, ' '), 'a b c d e f')
+        self.assertEqual(_chunk_str('a', 1, ' '), 'a')
+        self.assertEqual(_chunk_str('abcdef', 2, ''), 'abcdef')
+
+    def test_no_split(self):
+        self.assertEqual(_chunk_str('', 2, '\n'), '')
+        self.assertEqual(_chunk_str('a', 100, '\n'), 'a')
+        self.assertEqual(_chunk_str('abcdef', 42, '|'), 'abcdef')
+
+    def test_uneven_split(self):
+        self.assertEqual(_chunk_str('abcdef', 5, '|'), 'abcde|f')
+        self.assertEqual(_chunk_str('abcdef', 4, '|'), 'abcd|ef')
+        self.assertEqual(_chunk_str('abcdefg', 3, ' - '), 'abc - def - g')
+
+    def test_invalid_n(self):
+        with self.assertRaisesRegexp(ValueError, 'n=0'):
+            _chunk_str('abcdef', 0, ' ')
+
+        with self.assertRaisesRegexp(ValueError, 'n=-42'):
+            _chunk_str('abcdef', -42, ' ')
+
+
+class PhredDecoderTests(unittest.TestCase):
+    def test_missing_variant_and_phred_offset(self):
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred('abcd')
+        self.assertIn('`variant`', str(cm.exception))
+        self.assertIn('`phred_offset`', str(cm.exception))
+        self.assertIn('decode', str(cm.exception))
+
+    def test_variant_and_phred_offset_provided(self):
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred('abcd', variant='sanger', phred_offset=64)
+        self.assertIn('both', str(cm.exception))
+        self.assertIn('`variant`', str(cm.exception))
+        self.assertIn('`phred_offset`', str(cm.exception))
+
+    def test_solexa_variant(self):
+        with self.assertRaises(NotImplementedError) as cm:
+            _decode_qual_to_phred('abcd', variant='solexa')
+        self.assertIn('719', str(cm.exception))
+
+    def test_unrecognized_variant(self):
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred('abcd', variant='illumina')
+        self.assertIn('variant', str(cm.exception))
+        self.assertIn("'illumina'", str(cm.exception))
+
+    def test_empty_qual_str(self):
+        self.assertEqual(_decode_qual_to_phred('', variant='sanger'), [])
+
+    def test_sanger_variant(self):
+        # test entire range of possible ascii chars for sanger
+        all_sanger_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOP'
+                            'QRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
+        obs = _decode_qual_to_phred(all_sanger_ascii, variant='sanger')
+        self.assertEqual(obs, list(range(94)))
+
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred('a b', variant='sanger')
+        self.assertIn('-1', str(cm.exception))
+        self.assertIn('[0, 93]', str(cm.exception))
+
+    def test_illumina13_variant(self):
+        # test entire range of possible ascii chars for illumina1.3
+        all_illumina13_ascii = ('@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk'
+                                'lmnopqrstuvwxyz{|}~')
+        obs = _decode_qual_to_phred(all_illumina13_ascii,
+                                    variant='illumina1.3')
+        self.assertEqual(obs, list(range(63)))
+
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred('a!b', variant='illumina1.3')
+        self.assertIn('-31', str(cm.exception))
+        self.assertIn('[0, 62]', str(cm.exception))
+
+    def test_illumina18_variant(self):
+        # test entire range of possible ascii chars for illumina1.8
+        all_illumina18_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKL'
+                                'MNOPQRSTUVWXYZ[\\]^_')
+        obs = _decode_qual_to_phred(all_illumina18_ascii,
+                                    variant='illumina1.8')
+        self.assertEqual(obs, list(range(63)))
+
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred('AaB', variant='illumina1.8')
+        self.assertIn('64', str(cm.exception))
+        self.assertIn('[0, 62]', str(cm.exception))
+
+    def test_custom_phred_offset(self):
+        ascii_chars = '*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\'
+        obs = _decode_qual_to_phred(ascii_chars, phred_offset=42)
+        self.assertEqual(obs, list(range(51)))
+
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred(ascii_chars, phred_offset=43)
+        self.assertIn('-1', str(cm.exception))
+        self.assertIn('[0, 83]', str(cm.exception))
+
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred(ascii_chars, phred_offset=0)
+        self.assertIn('`phred_offset`', str(cm.exception))
+        self.assertIn('0', str(cm.exception))
+        self.assertIn('printable', str(cm.exception))
+
+        with self.assertRaises(ValueError) as cm:
+            _decode_qual_to_phred(ascii_chars, phred_offset=127)
+        self.assertIn('`phred_offset`', str(cm.exception))
+        self.assertIn('127', str(cm.exception))
+        self.assertIn('printable', str(cm.exception))
+
+
+class PhredEncoderTests(unittest.TestCase):
+    def test_missing_variant_and_phred_offset(self):
+        with self.assertRaises(ValueError) as cm:
+            _encode_phred_to_qual([1, 2, 3])
+        self.assertIn('`variant`', str(cm.exception))
+        self.assertIn('`phred_offset`', str(cm.exception))
+        self.assertIn('encode', str(cm.exception))
+
+    def test_variant_and_phred_offset_provided(self):
+        with self.assertRaises(ValueError) as cm:
+            _encode_phred_to_qual([1, 2, 3], variant='sanger', phred_offset=64)
+        self.assertIn('both', str(cm.exception))
+        self.assertIn('`variant`', str(cm.exception))
+        self.assertIn('`phred_offset`', str(cm.exception))
+
+    def test_solexa_variant(self):
+        with self.assertRaises(NotImplementedError) as cm:
+            _encode_phred_to_qual([1, 2, 3], variant='solexa')
+        self.assertIn('719', str(cm.exception))
+
+    def test_unrecognized_variant(self):
+        with self.assertRaises(ValueError) as cm:
+            _encode_phred_to_qual([1, 2, 3], variant='illumina')
+        self.assertIn('variant', str(cm.exception))
+        self.assertIn("'illumina'", str(cm.exception))
+
+    def test_no_phred_scores(self):
+        self.assertEqual(_encode_phred_to_qual([], variant='sanger'), '')
+
+    def test_sanger_variant(self):
+        # test entire range of possible ascii chars for sanger
+        all_sanger_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOP'
+                            'QRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
+        obs = _encode_phred_to_qual(list(range(94)), variant='sanger')
+        self.assertEqual(obs, all_sanger_ascii)
+
+        with self.assertRaises(ValueError) as cm:
+            _encode_phred_to_qual([42, -1, 33], variant='sanger')
+        self.assertIn('-1', str(cm.exception))
+        self.assertIn('[0, 93]', str(cm.exception))
+
+        obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
+                               [42, 94, 33], variant='sanger')
+        self.assertEqual(obs, 'K~B')
+
+    def test_illumina13_variant(self):
+        # test entire range of possible ascii chars for illumina1.3
+        all_illumina13_ascii = ('@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk'
+                                'lmnopqrstuvwxyz{|}~')
+        obs = _encode_phred_to_qual(list(range(63)), variant='illumina1.3')
+        self.assertEqual(obs, all_illumina13_ascii)
+
+        with self.assertRaises(ValueError) as cm:
+            _encode_phred_to_qual([42, -1, 33], variant='illumina1.3')
+        self.assertIn('-1', str(cm.exception))
+        self.assertIn('[0, 62]', str(cm.exception))
+
+        obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
+                               [42, 63, 33], variant='illumina1.3')
+        self.assertEqual(obs, 'j~a')
+
+    def test_illumina18_variant(self):
+        # test entire range of possible ascii chars for illumina1.8
+        all_illumina18_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKL'
+                                'MNOPQRSTUVWXYZ[\\]^_')
+        obs = _encode_phred_to_qual(list(range(63)), variant='illumina1.8')
+        self.assertEqual(obs, all_illumina18_ascii)
+
+        with self.assertRaises(ValueError) as cm:
+            _encode_phred_to_qual([42, -1, 33], variant='illumina1.8')
+        self.assertIn('-1', str(cm.exception))
+        self.assertIn('[0, 62]', str(cm.exception))
+
+        obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
+                               [42, 63, 33], variant='illumina1.8')
+        self.assertEqual(obs, 'K_B')
+
+    def test_custom_phred_offset(self):
+        ascii_chars = '*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\'
+        obs = _encode_phred_to_qual(list(range(51)), phred_offset=42)
+        self.assertEqual(obs, ascii_chars)
+
+        with self.assertRaises(ValueError) as cm:
+            _encode_phred_to_qual([42, -1, 33], phred_offset=42)
+        self.assertIn('-1', str(cm.exception))
+        self.assertIn('[0, 84]', str(cm.exception))
+
+        obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
+                               [42, 255, 33], phred_offset=42)
+        self.assertEqual(obs, 'T~K')
+
+
+class TestGetNthSequence(unittest.TestCase):
+    def setUp(self):
+        def generator():
+            for i in range(1, 6):
+                yield 'goldilocks: ' + str(i)
+
+        self.gen = generator()
+
+    def test_seq_num_too_small(self):
+        with self.assertRaises(ValueError) as cm:
+            _get_nth_sequence(self.gen, 0)
+
+        self.assertIn('between 1 and', str(cm.exception))
+        self.assertIn('0', str(cm.exception))
+
+    def test_seq_num_too_big(self):
+        with self.assertRaises(ValueError) as cm:
+            _get_nth_sequence(self.gen, 6)
+
+        self.assertIn('end of file', str(cm.exception))
+        self.assertIn('6th', str(cm.exception))
+
+    def test_seq_num_just_right(self):
+        value = _get_nth_sequence(self.gen, 3)
+        self.assertEqual(value, 'goldilocks: 3')
+
+
+class TestParseFASTALikeHeader(unittest.TestCase):
+    def test_no_id_or_description(self):
+        obs = _parse_fasta_like_header('> \t\t  \n')
+        self.assertEqual(obs, ('', ''))
+
+    def test_id_only(self):
+        obs = _parse_fasta_like_header('>suht! \t\t  \n')
+        self.assertEqual(obs, ('suht!', ''))
+
+    def test_description_only(self):
+        obs = _parse_fasta_like_header('> suht! \t\t  \n')
+        self.assertEqual(obs, ('', 'suht!'))
+
+    def test_id_and_description(self):
+        obs = _parse_fasta_like_header('>!thus  suht! \t\t  \n')
+        self.assertEqual(obs, ('!thus', 'suht!'))
+
+
+class TestFormatFASTALikeRecords(unittest.TestCase):
+    def setUp(self):
+        def generator():
+            yield BiologicalSequence('ACGT', id='', description='',
+                                     quality=range(4))
+            yield RNASequence('GAU', id='  foo \t\t bar ', description='')
+            yield DNASequence('TAG', id='', description='foo\n\n bar\n')
+            yield BiologicalSequence('A', id='foo', description='bar baz',
+                                     quality=[42])
+        self.gen = generator()
+
+    def test_no_replacement(self):
+        exp = [
+            ('', 'ACGT', range(4)),
+            ('  foo \t\t bar ', 'GAU', None),
+            (' foo\n\n bar\n', 'TAG', None),
+            ('foo bar baz', 'A', [42])
+        ]
+        obs = list(_format_fasta_like_records(self.gen, None, None, False))
+
+        self.assertEqual(len(obs), len(exp))
+        for o, e in zip(obs, exp):
+            npt.assert_equal(o, e)
+
+    def test_empty_str_replacement(self):
+        exp = [
+            ('', 'ACGT', range(4)),
+            ('foobar', 'GAU', None),
+            (' foo bar', 'TAG', None),
+            ('foo bar baz', 'A', [42])
+        ]
+        obs = list(_format_fasta_like_records(self.gen, '', '', False))
+
+        self.assertEqual(len(obs), len(exp))
+        for o, e in zip(obs, exp):
+            npt.assert_equal(o, e)
+
+    def test_multi_char_replacement(self):
+        exp = [
+            ('', 'ACGT', range(4)),
+            ('-.--.-foo-.--.--.--.-bar-.-', 'GAU', None),
+            (' foo_-__-_ bar_-_', 'TAG', None),
+            ('foo bar baz', 'A', [42])
+        ]
+        obs = list(_format_fasta_like_records(self.gen, '-.-', '_-_', False))
+
+        self.assertEqual(len(obs), len(exp))
+        for o, e in zip(obs, exp):
+            npt.assert_equal(o, e)
+
+    def test_newline_character_in_id_whitespace_replacement(self):
+        with self.assertRaisesRegexp(ValueError, 'Newline character'):
+            list(_format_fasta_like_records(self.gen, '-\n--', ' ', False))
+
+    def test_newline_character_in_description_newline_replacement(self):
+        with self.assertRaisesRegexp(ValueError, 'Newline character'):
+            list(_format_fasta_like_records(self.gen, None, 'a\nb', False))
+
+    def test_empty_sequence(self):
+        def blank_seq_gen():
+            for seq in (DNASequence('A'), BiologicalSequence(''),
+                        RNASequence('GG')):
+                yield seq
+
+        with self.assertRaisesRegexp(ValueError, '2nd.*empty'):
+            list(_format_fasta_like_records(blank_seq_gen(), None, None,
+                                            False))
+
+    def test_missing_quality_scores(self):
+        def missing_qual_gen():
+            for seq in (RNASequence('A', quality=[42]),
+                        BiologicalSequence('AG'),
+                        DNASequence('GG', quality=[41, 40])):
+                yield seq
+
+        with self.assertRaisesRegexp(ValueError,
+                                     '2nd sequence.*quality scores'):
+            list(_format_fasta_like_records(missing_qual_gen(), '-', '-',
+                                            True))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/tests/test_clustal.py b/skbio/io/tests/test_clustal.py
new file mode 100644
index 0000000..7a022cf
--- /dev/null
+++ b/skbio/io/tests/test_clustal.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+
+from unittest import TestCase, main
+
+from skbio.io.clustal import (_clustal_to_alignment, _alignment_to_clustal,
+                              _clustal_sniffer)
+from skbio.io.clustal import (_is_clustal_seq_line, last_space,
+                              _delete_trailing_number, _check_length,
+                              _label_line_parser)
+
+from skbio.io import ClustalFormatError
+from skbio.parse.record import DelimitedSplitter
+
+
+class ClustalHelperTests(TestCase):
+
+    """Tests of top-level functions."""
+
+    def test_label_line_parser(self):
+        last_space = DelimitedSplitter(None, -1)
+        self.assertEquals(_label_line_parser(StringIO('abc\tucag'),
+                                             last_space),
+                          ({"abc": ["ucag"]}, ['abc']))
+
+        with self.assertRaises(ClustalFormatError):
+            _label_line_parser(StringIO('abctucag'), last_space)
+
+    def test_is_clustal_seq_line(self):
+
+        ic = _is_clustal_seq_line
+        assert ic('abc')
+        assert ic('abc  def')
+        assert not ic('CLUSTAL')
+        assert not ic('CLUSTAL W fsdhicjkjsdk')
+        assert not ic('  *   *')
+        assert not ic(' abc def')
+        assert not ic('MUSCLE (3.41) multiple sequence alignment')
+
+    def test_last_space(self):
+
+        self.assertEqual(last_space('a\t\t\t  b    c'), ['a b', 'c'])
+        self.assertEqual(last_space('xyz'), ['xyz'])
+        self.assertEqual(last_space('  a b'), ['a', 'b'])
+
+    def test_delete_trailing_number(self):
+
+        dtn = _delete_trailing_number
+        self.assertEqual(dtn('abc'), 'abc')
+        self.assertEqual(dtn('a b c'), 'a b c')
+        self.assertEqual(dtn('a \t  b  \t  c'), 'a \t  b  \t  c')
+        self.assertEqual(dtn('a b 3'), 'a b')
+        self.assertEqual(dtn('a b c \t 345'), 'a b c')
+
+    def test_check_lengh(self):
+        self.assertEqual(False,
+                         _check_length({'abc': ['adjfkadfjaksdlfadskfda'],
+                                        'xyz': ['adjfkadfjaksdlfadsk']},
+                                       ['abc', 'xyz'])),
+        self.assertEqual(True,
+                         _check_length({'abc': ['adjfkadfjaksdlfadskfda'],
+                                        'xyz': ['adjfkadfjaksdlfadsksdf']},
+                                       ['abc', 'xyz']))
+        self.assertEqual(True,
+                         _check_length({'abc': ['adjfkadfjaksdlfadskfda',
+                                                'adjfkadfjaksdlfadskfda'],
+                                        'xyz': ['adjfkadfjaksdlfadsksdf',
+                                                'adjfkadfjaksdlfadsksdf']},
+                                       ['abc', 'xyz']))
+        self.assertEqual(False,
+                         _check_length({'abc': ['adjfkadfjaksdlfadskfd',
+                                                'adjfkadfjaksdlfadskfda'],
+                                        'xyz': ['adjfkadfjaksdlfadsksdf',
+                                                'adjfkadfjaksdlfadsksdf']},
+                                       ['abc', 'xyz']))
+        self.assertEqual(False,
+                         _check_length({'abc': ['adjfkadfjaksdlfadskfda',
+                                                'adjfkadfjaksdlfadskfda'],
+                                        'xyz': ['adjfkadfjaksdlfadsksdf',
+                                                'adjfkadfjaksdlfadsksd']},
+                                       ['abc', 'xyz']))
+
+
+class ClustalIOTests(TestCase):
+
+    def setUp(self):
+        self.valid_clustal_out = [
+            StringIO('abc\tucag'),
+            StringIO('abc\tuuu\ndef\tccc\n\n    ***\n\ndef ggg\nabc\taaa\n'),
+            StringIO('\n'.join(['abc uca', 'def ggg ccc'])),
+            StringIO('\n'.join(['abc uca ggg', 'def ggg ccc'])),
+            StringIO("""CLUSTAL
+
+
+abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+def             ------------------------------------------------------------
+xyz             ------------------------------------------------------------
+
+
+"""),
+            StringIO("""CLUSTAL
+
+
+abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+def             ------------------------------------------------------------
+xyz             ------------------------------------------------------------
+
+
+abc             GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC
+def             -----------------------------------------CGCGAUGCAUGCAU-CGAU
+xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC
+"""),
+            StringIO("""CLUSTAL W (1.82) multiple sequence alignment
+
+
+abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+def             ------------------------------------------------------------
+xyz             ------------------------------------------------------------
+
+
+abc             GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC
+def             -----------------------------------------CGCGAUGCAUGCAU-CGAU
+xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC
+
+
+abc             UGACUAGUCAGCUAGCAUCGAUCAGU
+def             CGAUCAGUCAGUCGAU----------
+xyz             UGCUGCAUCA----------------"""),
+            StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+
+
+abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA 60
+def             ------------------------------------------------------------
+xyz             ------------------------------------------------------------
+
+
+abc             GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC 11
+def             -----------------------------------------CGCGAUGCAUGCAU-CGAU 18
+xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC 23
+                                                         :    * * * *    **
+
+abc             UGACUAGUCAGCUAGCAUCGAUCAGU 145
+def             CGAUCAGUCAGUCGAU---------- 34
+xyz             UGCUGCAUCA---------------- 33
+                *     ***""")
+            ]
+        self.invalid_clustal_out = [StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        'dshfjsdfhdfsj',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        '\t',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfj\tdfhdfsj',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        'hfsdjk\tdfhjsdf'])),
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+
+
+adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+------------------------------------------------------------
+adk -----GGGGGGG------------------------------------------------
+"""),
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+
+
+adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+adk -----GGGGGGG------------------------------------------------
+
+
+adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+adk -----GGGGGGG---------------------------------------------
+"""),
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+
+
+adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+adk -----GGGGGGG---------------------------------------------
+
+
+adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCA
+adk -----GGGGGGG---------------------------------------------
+"""),
+
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+
+
+adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+------------------------------------------------------------
+adk -----GGGGGGG------------------------------------------------
+"""),
+
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+
+
+GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
+------------------------------------------------------------
+------------------------------------------------------------
+
+
+GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC
+-----------------------------------------CGCGAUGCAUGCAU-CGAU
+------------------------------------------------------------
+                                         :    * * * *    **
+
+UGACUAGUCAGCUAGCAUCGAUCAGU 145
+CGAUCAGUCAGUCGAU---------- 34
+UGCUGCAUCA---------------- 33
+*     ***""")]
+
+    def test_alignment_to_clustal_with_empty_input(self):
+        result = _clustal_to_alignment(StringIO())
+        self.assertEqual(dict(result), {})
+
+    def test_alignment_to_clustal_with_bad_input(self):
+        BAD = StringIO('\n'.join(['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']))
+        result = _clustal_to_alignment(BAD, strict=False)
+        self.assertEqual(dict(result), {})
+        # should fail unless we turned strict processing off
+        with self.assertRaises(ClustalFormatError):
+            BAD.seek(0)
+            dict(_clustal_to_alignment(BAD))
+
+    def test_valid_alignment_to_clustal_and_clustal_to_alignment(self):
+        import os
+        for valid_out in self.valid_clustal_out:
+            fname = "test.aln"
+            testfile = open(fname, 'w')
+            result_before = _clustal_to_alignment(valid_out)
+            _alignment_to_clustal(result_before, testfile)
+            testfile.close()
+            testfile = open(fname, 'r')
+            result_after = _clustal_to_alignment(testfile)
+            self.assertEquals(result_before, result_after)
+        os.remove(fname)
+
+    def test_invalid_alignment_to_clustal_and_clustal_to_alignment(self):
+        for invalid_out in self.invalid_clustal_out:
+            with self.assertRaises(ClustalFormatError):
+                dict(_clustal_to_alignment(invalid_out, strict=True))
+
+    def test_clustal_sniffer_valid_files(self):
+        for valid_out in self.valid_clustal_out:
+            self.assertEqual(_clustal_sniffer(valid_out), (True, {}))
+
+    def test_clustal_sniffer_invalid_files(self):
+        for invalid_out in self.invalid_clustal_out:
+            self.assertEqual(_clustal_sniffer(invalid_out), (False, {}))
+        # sniffer should return False on empty file (which isn't contained
+        # in self.invalid_clustal_out since an empty file is a valid output)
+        self.assertEqual(_clustal_sniffer(StringIO()), (False, {}))
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/io/tests/test_fasta.py b/skbio/io/tests/test_fasta.py
new file mode 100644
index 0000000..e8e6647
--- /dev/null
+++ b/skbio/io/tests/test_fasta.py
@@ -0,0 +1,893 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import map, range, zip
+from six import StringIO
+
+from unittest import TestCase, main
+
+from skbio import (BiologicalSequence, NucleotideSequence, DNA, RNA, Protein,
+                   ProteinSequence, SequenceCollection, Alignment)
+from skbio.sequence import BiologicalSequenceError
+from skbio.io import FASTAFormatError
+from skbio.io.fasta import (
+    _fasta_sniffer, _fasta_to_generator, _fasta_to_biological_sequence,
+    _fasta_to_nucleotide_sequence, _fasta_to_dna_sequence,
+    _fasta_to_rna_sequence, _fasta_to_protein_sequence,
+    _fasta_to_sequence_collection, _fasta_to_alignment, _generator_to_fasta,
+    _biological_sequence_to_fasta, _nucleotide_sequence_to_fasta,
+    _dna_sequence_to_fasta, _rna_sequence_to_fasta, _protein_sequence_to_fasta,
+    _sequence_collection_to_fasta, _alignment_to_fasta)
+from skbio.util import get_data_path
+
+
+class SnifferTests(TestCase):
+    def setUp(self):
+        self.positive_fps = list(map(get_data_path, [
+            'fasta_3_seqs_defaults',
+            'fasta_max_width_1',
+            'fasta_single_bio_seq_non_defaults',
+            'fasta_single_prot_seq_non_defaults',
+            'fasta_3_seqs_non_defaults',
+            'fasta_max_width_5',
+            'fasta_single_dna_seq_defaults',
+            'fasta_single_rna_seq_defaults',
+            'fasta_description_newline_replacement_empty_str',
+            'fasta_multi_seq',
+            'fasta_single_dna_seq_non_defaults',
+            'fasta_single_rna_seq_non_defaults',
+            'fasta_description_newline_replacement_multi_char',
+            'fasta_prot_seqs_odd_labels',
+            'fasta_single_nuc_seq_defaults',
+            'fasta_single_seq',
+            'fasta_id_whitespace_replacement_empty_str',
+            'fasta_sequence_collection_different_type',
+            'fasta_single_nuc_seq_non_defaults',
+            'fasta_id_whitespace_replacement_multi_char',
+            'fasta_single_bio_seq_defaults',
+            'fasta_single_prot_seq_defaults',
+            'fasta_10_seqs',
+            'fasta_invalid_after_10_seqs',
+            'fasta_mixed_qual_scores',
+            'qual_invalid_qual_scores_float',
+            'qual_invalid_qual_scores_string'
+        ]))
+
+        self.negative_fps = list(map(get_data_path, [
+            'empty',
+            'whitespace_only',
+            'fasta_invalid_missing_header',
+            'fasta_invalid_blank_line',
+            'fasta_invalid_whitespace_only_line',
+            'fasta_invalid_missing_seq_data_first',
+            'fasta_invalid_missing_seq_data_middle',
+            'fasta_invalid_missing_seq_data_last',
+            'fasta_invalid_legacy_format',
+            'fasta_id_whitespace_replacement_none',
+            'fasta_description_newline_replacement_none',
+            'qual_2_seqs_defaults',
+            'qual_3_seqs_defaults',
+            'qual_3_seqs_defaults_desc_mismatch',
+            'qual_3_seqs_defaults_extra',
+            'qual_3_seqs_defaults_id_mismatch',
+            'qual_3_seqs_defaults_length_mismatch',
+            'qual_3_seqs_non_defaults',
+            'qual_description_newline_replacement_empty_str',
+            'qual_description_newline_replacement_multi_char',
+            'qual_description_newline_replacement_none',
+            'qual_id_whitespace_replacement_empty_str',
+            'qual_id_whitespace_replacement_multi_char',
+            'qual_id_whitespace_replacement_none',
+            'qual_invalid_blank_line',
+            'qual_invalid_legacy_format',
+            'qual_invalid_missing_header',
+            'qual_invalid_missing_qual_scores_first',
+            'qual_invalid_missing_qual_scores_last',
+            'qual_invalid_missing_qual_scores_middle',
+            'qual_invalid_whitespace_only_line',
+            'qual_max_width_1',
+            'qual_max_width_5',
+            'qual_multi_seq',
+            'qual_multi_seq_roundtrip',
+            'qual_prot_seqs_odd_labels',
+            'qual_sequence_collection_different_type',
+            'qual_single_bio_seq_non_defaults',
+            'qual_single_dna_seq_non_defaults',
+            'qual_single_nuc_seq_non_defaults',
+            'qual_single_prot_seq_non_defaults',
+            'qual_single_rna_seq_non_defaults',
+            'qual_single_seq'
+        ]))
+
+    def test_positives(self):
+        for fp in self.positive_fps:
+            self.assertEqual(_fasta_sniffer(fp), (True, {}))
+
+    def test_negatives(self):
+        for fp in self.negative_fps:
+            self.assertEqual(_fasta_sniffer(fp), (False, {}))
+
+
+class ReaderTests(TestCase):
+    def setUp(self):
+        # each structure stores the sequence generator results (expanded into a
+        # list) that we expect to obtain from reading, matched with kwargs to
+        # pass to the reader, and fasta and qual filepaths that should
+        # deserialize into the expected generator results
+
+        # empty file shouldn't yield sequences
+        self.empty = ([], {}, list(map(get_data_path, ['empty'])),
+                      list(map(get_data_path, ['empty'])))
+
+        # single sequence
+        self.single = (
+            [BiologicalSequence(
+                'ACGT-acgt.', id='seq1', description='desc1',
+                quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])],
+            {},
+            list(map(get_data_path, ['fasta_single_seq',
+                                     'fasta_max_width_1'])),
+            list(map(get_data_path, ['qual_single_seq', 'qual_max_width_1']))
+        )
+
+        # multiple sequences
+        self.multi = (
+            [BiologicalSequence(
+                'ACGT-acgt.', id='seq1', description='desc1',
+                quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456]),
+             BiologicalSequence('A', id='_____seq__2_', quality=[42]),
+             BiologicalSequence(
+                'AACGGuA', description='desc3', quality=[0, 0, 0, 0, 0, 0, 0]),
+             BiologicalSequence('AcGtUTu', quality=[1, 2, 3, 4, 5, 6, 777]),
+             BiologicalSequence(
+                'ACGTTGCAccGG',
+                quality=[55, 10, 0, 999, 1, 1, 8, 775, 40, 10, 10, 0]),
+             BiologicalSequence('ACGUU', quality=[10, 9, 8, 7, 6]),
+             BiologicalSequence(
+                 'pQqqqPPQQQ', id='proteinseq',
+                 description='detailed description \t\twith  new  lines',
+                 quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])],
+            {},
+            list(map(get_data_path, ['fasta_multi_seq', 'fasta_max_width_5'])),
+            list(map(get_data_path, ['qual_multi_seq', 'qual_max_width_5']))
+        )
+
+        # test constructor parameter, as well as odd labels (label only
+        # containing whitespace, label description preceded by multiple spaces,
+        # no id) and leading/trailing whitespace on sequence data. for qual
+        # files, in addition to the odd labels, test leading/trailing
+        # whitespace on qual scores, as well as strange number formatting.
+        # also test that fasta and qual headers do not need to match
+        # exactly, only that they need to match exactly after parsing (e.g.,
+        # after stripping leading/trailing whitespace from descriptions)
+        self.odd_labels_different_type = (
+            [Protein('DEFQfp', quality=[0, 0, 1, 5, 44, 0]),
+             Protein(
+                 'SKBI', description='skbio', quality=[1, 2, 33, 123456789])],
+            {'constructor': ProteinSequence},
+            list(map(get_data_path, ['fasta_prot_seqs_odd_labels'])),
+            list(map(get_data_path, ['qual_prot_seqs_odd_labels']))
+        )
+
+        # sequences that can be loaded into a SequenceCollection or Alignment.
+        # they are also a different type than BiologicalSequence in order to
+        # exercise the constructor parameter
+        self.sequence_collection_different_type = (
+            [RNA('AUG', quality=[20, 20, 21]),
+             RNA('AUC', id='rnaseq-1', description='rnaseq desc 1',
+                 quality=[10, 9, 10]),
+             RNA('AUG', id='rnaseq-2', description='rnaseq desc 2',
+                 quality=[9, 99, 999])],
+            {'constructor': RNA},
+            list(map(get_data_path,
+                     ['fasta_sequence_collection_different_type'])),
+            list(map(get_data_path,
+                     ['qual_sequence_collection_different_type']))
+        )
+
+        # store fasta filepath, kwargs, error type, and expected error message
+        # for invalid input.
+        #
+        # note: there is some duplication in testing that fasta and qual
+        # parsers raise expected errors. even though the parsers share the same
+        # underlying logic, these tests are here as a safeguard in case the
+        # code is refactored in the future such that fasta and qual have
+        # different implementations (e.g., if qual is written in cython while
+        # fasta remains in python)
+        self.invalid_fps = list(map(lambda e: (get_data_path(e[0]),
+                                               e[1], e[2], e[3]), [
+            # whitespace-only fasta and qual
+            ('whitespace_only', {}, FASTAFormatError,
+             'without a header.*FASTA'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('whitespace_only')}, FASTAFormatError,
+             'without a header.*QUAL'),
+
+            # fasta and qual missing header
+            ('fasta_invalid_missing_header', {}, FASTAFormatError,
+             'without a header.*FASTA'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_missing_header')},
+             FASTAFormatError, 'without a header.*QUAL'),
+
+            # fasta and qual with blank line
+            ('fasta_invalid_blank_line', {}, FASTAFormatError,
+             'whitespace-only.*FASTA'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_blank_line')},
+             FASTAFormatError, 'whitespace-only.*QUAL'),
+
+            # fasta and qual with whitespace-only line
+            ('fasta_invalid_whitespace_only_line', {}, FASTAFormatError,
+             'whitespace-only.*FASTA'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_whitespace_only_line')},
+             FASTAFormatError, 'whitespace-only.*QUAL'),
+
+            # fasta and qual missing record data (first record)
+            ('fasta_invalid_missing_seq_data_first', {}, FASTAFormatError,
+             'without sequence data'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_missing_qual_scores_first')},
+             FASTAFormatError, 'without quality scores'),
+
+            # fasta and qual missing record data (middle record)
+            ('fasta_invalid_missing_seq_data_middle', {}, FASTAFormatError,
+             'without sequence data'),
+            ('fasta_3_seqs_defaults',
+             {'qual':
+              get_data_path('qual_invalid_missing_qual_scores_middle')},
+             FASTAFormatError, 'without quality scores'),
+
+            # fasta and qual missing record data (last record)
+            ('fasta_invalid_missing_seq_data_last', {}, FASTAFormatError,
+             'without sequence data'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_missing_qual_scores_last')},
+             FASTAFormatError, 'without quality scores'),
+
+            # fasta and qual in legacy format (;)
+            ('fasta_invalid_legacy_format', {}, FASTAFormatError,
+             'without a header.*FASTA'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_legacy_format')},
+             FASTAFormatError, 'without a header.*QUAL'),
+
+            # qual file with an extra record
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_3_seqs_defaults_extra')},
+             FASTAFormatError, 'QUAL file has more'),
+
+            # fasta file with an extra record
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_2_seqs_defaults')},
+             FASTAFormatError, 'FASTA file has more'),
+
+            # id mismatch between fasta and qual
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_3_seqs_defaults_id_mismatch')},
+             FASTAFormatError,
+             'IDs do not match.*\'s_e_q_2\' != \'s_e_q_42\''),
+
+            # description mismatch between fasta and qual
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_3_seqs_defaults_desc_mismatch')},
+             FASTAFormatError,
+             'Descriptions do not match.*\'desc 2\' != \'desc 42\''),
+
+            # sequence and quality score length mismatch between fasta and qual
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_3_seqs_defaults_length_mismatch')},
+             BiologicalSequenceError,
+             'Number of Phred quality scores \(3\).*\(4\)'),
+
+            # invalid qual scores (string value can't be converted to integer)
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_qual_scores_string')},
+             FASTAFormatError,
+             'quality scores to integers:\n100 0 1a -42'),
+
+            # invalid qual scores (float value can't be converted to integer)
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_qual_scores_float')},
+             FASTAFormatError,
+             'quality scores to integers:\n42    41.0 39 40'),
+
+            # invalid qual scores (negative integer)
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_qual_scores_negative')},
+             BiologicalSequenceError,
+             'Phred quality scores.*greater than or equal to zero'),
+
+            # misc. invalid files used elsewhere in the tests
+            ('fasta_invalid_after_10_seqs', {}, FASTAFormatError,
+             'without sequence data'),
+            ('fasta_id_whitespace_replacement_none', {}, FASTAFormatError,
+             'whitespace-only.*FASTA'),
+            ('fasta_description_newline_replacement_none', {},
+             FASTAFormatError, 'whitespace-only.*FASTA')
+        ]))
+
+    # extensive tests for fasta -> generator reader since it is used by all
+    # other fasta -> object readers
+
+    def test_fasta_to_generator_valid_files(self):
+        test_cases = (self.empty, self.single, self.multi,
+                      self.odd_labels_different_type,
+                      self.sequence_collection_different_type)
+
+        # Strategy:
+        #   for each fasta file, read it without its corresponding qual file,
+        #   and ensure observed vs. expected match, ignoring quality scores in
+        #   expected. next, parse the current fasta file with each
+        #   corresponding quality file and ensure that observed vs. expected
+        #   match, this time taking quality scores into account. this
+        #   sufficiently exercises parsing a standalone fasta file and paired
+        #   fasta/qual files
+        for exp, kwargs, fasta_fps, qual_fps in test_cases:
+            for fasta_fp in fasta_fps:
+                obs = list(_fasta_to_generator(fasta_fp, **kwargs))
+
+                self.assertEqual(len(obs), len(exp))
+                for o, e in zip(obs, exp):
+                    self.assertTrue(o.equals(e, ignore=['quality']))
+
+                for qual_fp in qual_fps:
+                    obs = list(_fasta_to_generator(fasta_fp, qual=qual_fp,
+                                                   **kwargs))
+
+                    self.assertEqual(len(obs), len(exp))
+                    for o, e in zip(obs, exp):
+                        self.assertTrue(o.equals(e))
+
+    def test_fasta_to_generator_invalid_files(self):
+        for fp, kwargs, error_type, error_msg_regex in self.invalid_fps:
+            with self.assertRaisesRegexp(error_type, error_msg_regex):
+                list(_fasta_to_generator(fp, **kwargs))
+
+    # light testing of fasta -> object readers to ensure interface is present
+    # and kwargs are passed through. extensive testing of underlying reader is
+    # performed above
+
+    def test_fasta_to_any_sequence(self):
+        for constructor, reader_fn in ((BiologicalSequence,
+                                        _fasta_to_biological_sequence),
+                                       (NucleotideSequence,
+                                        _fasta_to_nucleotide_sequence),
+                                       (DNA,
+                                        _fasta_to_dna_sequence),
+                                       (RNA,
+                                        _fasta_to_rna_sequence),
+                                       (Protein,
+                                        _fasta_to_protein_sequence)):
+
+            # empty file
+            empty_fp = get_data_path('empty')
+            with self.assertRaisesRegexp(ValueError, '1st sequence'):
+                reader_fn(empty_fp)
+            with self.assertRaisesRegexp(ValueError, '1st sequence'):
+                reader_fn(empty_fp, qual=empty_fp)
+
+            # the sequences in the following files don't necessarily make sense
+            # for each of the sequence object types that they're read into
+            # (e.g., reading a protein sequence into a dna sequence object).
+            # however, for the purposes of testing the various
+            # fasta -> sequence readers, this works out okay as it is valid to
+            # construct a sequence object with invalid characters. we're
+            # interested in testing the reading logic here, and don't care so
+            # much about constructing semantically-meaningful/valid sequence
+            # objects
+
+            # file with only 1 seq, get first
+            fasta_fps = list(map(get_data_path,
+                                 ['fasta_single_seq', 'fasta_max_width_1']))
+            for fasta_fp in fasta_fps:
+                exp = constructor(
+                    'ACGT-acgt.', id='seq1', description='desc1',
+                    quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
+
+                obs = reader_fn(fasta_fp)
+                self.assertTrue(obs.equals(exp, ignore=['quality']))
+
+                qual_fps = list(map(get_data_path,
+                                    ['qual_single_seq', 'qual_max_width_1']))
+                for qual_fp in qual_fps:
+                    obs = reader_fn(fasta_fp, qual=qual_fp)
+                    self.assertTrue(obs.equals(exp))
+
+            # file with multiple seqs
+            fasta_fps = list(map(get_data_path,
+                                 ['fasta_multi_seq', 'fasta_max_width_5']))
+            qual_fps = list(map(get_data_path,
+                                ['qual_multi_seq', 'qual_max_width_5']))
+            for fasta_fp in fasta_fps:
+                # get first
+                exp = constructor(
+                    'ACGT-acgt.', id='seq1', description='desc1',
+                    quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
+
+                obs = reader_fn(fasta_fp)
+                self.assertTrue(obs.equals(exp, ignore=['quality']))
+
+                for qual_fp in qual_fps:
+                    obs = reader_fn(fasta_fp, qual=qual_fp)
+                    self.assertTrue(obs.equals(exp))
+
+                # get middle
+                exp = constructor('AcGtUTu', quality=[1, 2, 3, 4, 5, 6, 777])
+
+                obs = reader_fn(fasta_fp, seq_num=4)
+                self.assertTrue(obs.equals(exp, ignore=['quality']))
+
+                for qual_fp in qual_fps:
+                    obs = reader_fn(fasta_fp, seq_num=4, qual=qual_fp)
+                    self.assertTrue(obs.equals(exp))
+
+                # get last
+                exp = constructor(
+                    'pQqqqPPQQQ', id='proteinseq',
+                    description='detailed description \t\twith  new  lines',
+                    quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])
+
+                obs = reader_fn(fasta_fp, seq_num=7)
+                self.assertTrue(obs.equals(exp, ignore=['quality']))
+
+                for qual_fp in qual_fps:
+                    obs = reader_fn(fasta_fp, seq_num=7, qual=qual_fp)
+                    self.assertTrue(obs.equals(exp))
+
+                # seq_num too large
+                with self.assertRaisesRegexp(ValueError, '8th sequence'):
+                    reader_fn(fasta_fp, seq_num=8)
+                for qual_fp in qual_fps:
+                    with self.assertRaisesRegexp(ValueError, '8th sequence'):
+                        reader_fn(fasta_fp, seq_num=8, qual=qual_fp)
+
+                # seq_num too small
+                with self.assertRaisesRegexp(ValueError, '`seq_num`=0'):
+                    reader_fn(fasta_fp, seq_num=0)
+                for qual_fp in qual_fps:
+                    with self.assertRaisesRegexp(ValueError, '`seq_num`=0'):
+                        reader_fn(fasta_fp, seq_num=0, qual=qual_fp)
+
+    def test_fasta_to_sequence_collection_and_alignment(self):
+        test_cases = (self.empty, self.single,
+                      self.sequence_collection_different_type)
+
+        for constructor, reader_fn in ((SequenceCollection,
+                                        _fasta_to_sequence_collection),
+                                       (Alignment,
+                                        _fasta_to_alignment)):
+            # see comment in test_fasta_to_generator_valid_files (above) for
+            # testing strategy
+            for exp_list, kwargs, fasta_fps, qual_fps in test_cases:
+                exp = constructor(exp_list)
+
+                for fasta_fp in fasta_fps:
+                    obs = reader_fn(fasta_fp, **kwargs)
+
+                    # TODO remove this custom equality testing code when
+                    # SequenceCollection has an equals method (part of #656).
+                    # We need this method to include IDs and description in the
+                    # comparison (not part of SequenceCollection.__eq__).
+                    self.assertEqual(obs, exp)
+                    for o, e in zip(obs, exp):
+                        self.assertTrue(o.equals(e, ignore=['quality']))
+
+                    for qual_fp in qual_fps:
+                        obs = reader_fn(fasta_fp, qual=qual_fp, **kwargs)
+
+                        # TODO remove this custom equality testing code when
+                        # SequenceCollection has an equals method (part of
+                        # #656). We need this method to include IDs and
+                        # description in the comparison (not part of
+                        # SequenceCollection.__eq__).
+                        self.assertEqual(obs, exp)
+                        for o, e in zip(obs, exp):
+                            self.assertTrue(o.equals(e))
+
+
+class WriterTests(TestCase):
+    def setUp(self):
+        self.bio_seq1 = BiologicalSequence(
+            'ACGT-acgt.', id='seq1', description='desc1',
+            quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
+        self.bio_seq2 = BiologicalSequence(
+            'A', id=' \n  \nseq \t2 ', quality=[42])
+        self.bio_seq3 = BiologicalSequence(
+            'AACGGuA', description='desc3', quality=[0, 0, 0, 0, 0, 0, 0])
+        self.nuc_seq = NucleotideSequence(
+            'AcGtUTu', quality=[1, 2, 3, 4, 5, 6, 777])
+        self.dna_seq = DNA(
+            'ACGTTGCAccGG',
+            quality=[55, 10, 0, 999, 1, 1, 8, 775, 40, 10, 10, 0])
+        self.rna_seq = RNA('ACGUU', quality=[10, 9, 8, 7, 6])
+        self.prot_seq = Protein(
+            'pQqqqPPQQQ', id='proteinseq',
+            description='\ndetailed\ndescription \t\twith  new\n\nlines\n\n\n',
+            quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])
+
+        seqs = [
+            RNA('UUUU', id='s\te\tq\t1', description='desc\n1',
+                quality=[1234, 0, 0, 2]),
+            BiologicalSequence(
+                'CATC', id='s\te\tq\t2', description='desc\n2',
+                quality=[1, 11, 111, 11112]),
+            Protein('sits', id='s\te\tq\t3', description='desc\n3',
+                    quality=[12345, 678909, 999999, 4242424242])
+        ]
+        self.seq_coll = SequenceCollection(seqs)
+        self.align = Alignment(seqs)
+
+        def empty_gen():
+            raise StopIteration()
+            yield
+
+        def single_seq_gen():
+            yield self.bio_seq1
+
+        # generate sequences with descriptions containing newlines (to test
+        # description_newline_replacement)
+        def newline_description_gen():
+            yield self.prot_seq
+            yield DNA('AGGAGAATA', id='foo', description='\n\n\n\n',
+                      quality=range(9))
+
+        # generate sequences with ids containing whitespace (to test
+        # id_whitespace_replacement)
+        def whitespace_id_gen():
+            yield self.bio_seq2
+            yield RNA('UA', id='\n\t \t', description='a\nb',
+                      quality=[1000, 1])
+
+        # multiple sequences of mixed types, lengths, and metadata. lengths are
+        # chosen to exercise various splitting cases when testing max_width,
+        # including exercising the different splitting algorithms used for
+        # sequence data vs. quality scores
+        def multi_seq_gen():
+            for seq in (self.bio_seq1, self.bio_seq2, self.bio_seq3,
+                        self.nuc_seq, self.dna_seq, self.rna_seq,
+                        self.prot_seq):
+                yield seq
+
+        # can be serialized if no qual file is provided, else it should raise
+        # an error because one seq has qual scores and the other doesn't
+        def mixed_qual_score_gen():
+            missing_qual_seq = BiologicalSequence(
+                'AAAAT', id='da,dadadada', description='10 hours')
+            for seq in self.bio_seq1, missing_qual_seq:
+                yield seq
+
+        self.mixed_qual_score_gen = mixed_qual_score_gen()
+
+        # store sequence generator to serialize, writer kwargs (if any), and
+        # fasta and qual filepaths of expected results
+        self.objs_fps = list(map(lambda e: (e[0], e[1], get_data_path(e[2]),
+                                            get_data_path(e[3])), [
+            (empty_gen(), {}, 'empty', 'empty'),
+            (single_seq_gen(), {}, 'fasta_single_seq', 'qual_single_seq'),
+
+            # no splitting of sequence or qual data across lines b/c max_width
+            # is sufficiently large
+            (single_seq_gen(), {'max_width': 32}, 'fasta_single_seq',
+             'qual_single_seq'),
+
+            # splitting algorithm for sequence and qual scores is different;
+            # make sure individual qual scores aren't split across lines even
+            # if they exceed max_width
+            (single_seq_gen(), {'max_width': 1}, 'fasta_max_width_1',
+             'qual_max_width_1'),
+
+            (multi_seq_gen(), {}, 'fasta_multi_seq', 'qual_multi_seq'),
+            (multi_seq_gen(), {'max_width': 5}, 'fasta_max_width_5',
+             'qual_max_width_5'),
+            (newline_description_gen(),
+             {'description_newline_replacement': ':-)'},
+             'fasta_description_newline_replacement_multi_char',
+             'qual_description_newline_replacement_multi_char'),
+            (newline_description_gen(),
+             {'description_newline_replacement': ''},
+             'fasta_description_newline_replacement_empty_str',
+             'qual_description_newline_replacement_empty_str',),
+            (newline_description_gen(),
+             {'description_newline_replacement': None},
+             'fasta_description_newline_replacement_none',
+             'qual_description_newline_replacement_none'),
+            (whitespace_id_gen(),
+             {'id_whitespace_replacement': '>:o'},
+             'fasta_id_whitespace_replacement_multi_char',
+             'qual_id_whitespace_replacement_multi_char'),
+            (whitespace_id_gen(),
+             {'id_whitespace_replacement': ''},
+             'fasta_id_whitespace_replacement_empty_str',
+             'qual_id_whitespace_replacement_empty_str'),
+            (whitespace_id_gen(),
+             {'id_whitespace_replacement': None},
+             'fasta_id_whitespace_replacement_none',
+             'qual_id_whitespace_replacement_none'),
+        ]))
+
+        def blank_seq_gen():
+            for seq in self.bio_seq1, BiologicalSequence(''):
+                yield seq
+
+        # generators or parameter combos that cannot be written in fasta
+        # format, paired with kwargs (if any), error type, and expected error
+        # message regexp
+        self.invalid_objs = [
+            (blank_seq_gen(), {}, ValueError, '2nd.*empty'),
+            (single_seq_gen(),
+             {'max_width': 0}, ValueError, 'max_width=0'),
+            (multi_seq_gen(), {'id_whitespace_replacement': '-\n_'},
+             ValueError, 'Newline character'),
+            (multi_seq_gen(), {'description_newline_replacement': '-.-\n'},
+             ValueError, 'Newline character'),
+            (mixed_qual_score_gen(), {'qual': StringIO()}, ValueError,
+             '2nd sequence.*does not have quality scores')
+        ]
+
+    # extensive tests for generator -> fasta writer since it is used by all
+    # other object -> fasta writers
+
+    def test_generator_to_fasta_no_qual(self):
+        # test writing standalone fasta (i.e., without a qual file)
+        for obj, kwargs, fp, _ in self.objs_fps:
+            fh = StringIO()
+            _generator_to_fasta(obj, fh, **kwargs)
+            obs = fh.getvalue()
+            fh.close()
+
+            with open(fp, 'U') as fh:
+                exp = fh.read()
+
+            self.assertEqual(obs, exp)
+
+    def test_generator_to_fasta_mixed_qual_scores(self):
+        # test writing some sequences with qual scores and some without is
+        # possible if no qual output file is specified
+        fh = StringIO()
+        _generator_to_fasta(self.mixed_qual_score_gen, fh)
+        obs = fh.getvalue()
+        fh.close()
+
+        with open(get_data_path('fasta_mixed_qual_scores'), 'U') as fh:
+            exp = fh.read()
+
+        self.assertEqual(obs, exp)
+
+    def test_generator_to_fasta_with_qual(self):
+        # test writing fasta and qual files
+        for obj, kwargs, fasta_fp, qual_fp in self.objs_fps:
+            if qual_fp is not None:
+                fasta_fh = StringIO()
+                qual_fh = StringIO()
+                _generator_to_fasta(obj, fasta_fh, qual=qual_fh, **kwargs)
+                obs_fasta = fasta_fh.getvalue()
+                obs_qual = qual_fh.getvalue()
+                fasta_fh.close()
+                qual_fh.close()
+
+                with open(fasta_fp, 'U') as fh:
+                    exp_fasta = fh.read()
+                with open(qual_fp, 'U') as fh:
+                    exp_qual = fh.read()
+
+                self.assertEqual(obs_fasta, exp_fasta)
+                self.assertEqual(obs_qual, exp_qual)
+
+    def test_generator_to_fasta_invalid_input(self):
+        for obj, kwargs, error_type, error_msg_regexp in self.invalid_objs:
+            fh = StringIO()
+            with self.assertRaisesRegexp(error_type, error_msg_regexp):
+                _generator_to_fasta(obj, fh, **kwargs)
+            fh.close()
+
+    # light testing of object -> fasta writers to ensure interface is present
+    # and kwargs are passed through. extensive testing of underlying writer is
+    # performed above
+
+    def test_any_sequence_to_fasta(self):
+        # store writer function, sequence object to write, expected
+        # fasta filepath for default parameters, expected fasta filepath for
+        # non-defaults, and expected qual filepath for non-defaults
+        id_ = 'f o o'
+        desc = 'b\na\nr'
+        test_data = (
+            (_biological_sequence_to_fasta,
+             BiologicalSequence('ACGT', id=id_, description=desc,
+                                quality=range(1, 5)),
+             ('fasta_single_bio_seq_defaults',
+              'fasta_single_bio_seq_non_defaults',
+              'qual_single_bio_seq_non_defaults')),
+            (_nucleotide_sequence_to_fasta,
+             NucleotideSequence('ACGTU', id=id_, description=desc,
+                                quality=range(5)),
+             ('fasta_single_nuc_seq_defaults',
+              'fasta_single_nuc_seq_non_defaults',
+              'qual_single_nuc_seq_non_defaults')),
+            (_dna_sequence_to_fasta,
+             DNA('TACG', id=id_, description=desc, quality=range(4)),
+             ('fasta_single_dna_seq_defaults',
+              'fasta_single_dna_seq_non_defaults',
+              'qual_single_dna_seq_non_defaults')),
+            (_rna_sequence_to_fasta,
+             RNA('UACG', id=id_, description=desc, quality=range(2, 6)),
+             ('fasta_single_rna_seq_defaults',
+              'fasta_single_rna_seq_non_defaults',
+              'qual_single_rna_seq_non_defaults')),
+            (_protein_sequence_to_fasta,
+             Protein('PQQ', id=id_, description=desc, quality=[42, 41, 40]),
+             ('fasta_single_prot_seq_defaults',
+              'fasta_single_prot_seq_non_defaults',
+              'qual_single_prot_seq_non_defaults')))
+
+        for fn, obj, fps in test_data:
+            defaults_fp, non_defaults_fasta_fp, non_defaults_qual_fp = fps
+
+            # test writing with default parameters
+            fh = StringIO()
+            fn(obj, fh)
+            obs = fh.getvalue()
+            fh.close()
+
+            with open(get_data_path(defaults_fp), 'U') as fh:
+                exp = fh.read()
+
+            self.assertEqual(obs, exp)
+
+            # test writing with non-defaults
+            fasta_fh = StringIO()
+            qual_fh = StringIO()
+            fn(obj, fasta_fh, id_whitespace_replacement='-',
+               description_newline_replacement='_', max_width=1, qual=qual_fh)
+            obs_fasta = fasta_fh.getvalue()
+            obs_qual = qual_fh.getvalue()
+            fasta_fh.close()
+            qual_fh.close()
+
+            with open(get_data_path(non_defaults_fasta_fp), 'U') as fh:
+                exp_fasta = fh.read()
+            with open(get_data_path(non_defaults_qual_fp), 'U') as fh:
+                exp_qual = fh.read()
+
+            self.assertEqual(obs_fasta, exp_fasta)
+            self.assertEqual(obs_qual, exp_qual)
+
+    def test_any_sequences_to_fasta(self):
+        for fn, obj in ((_sequence_collection_to_fasta, self.seq_coll),
+                        (_alignment_to_fasta, self.align)):
+            # test writing with default parameters
+            fh = StringIO()
+            fn(obj, fh)
+            obs = fh.getvalue()
+            fh.close()
+
+            with open(get_data_path('fasta_3_seqs_defaults'), 'U') as fh:
+                exp = fh.read()
+
+            self.assertEqual(obs, exp)
+
+            # test writing with non-defaults
+            fasta_fh = StringIO()
+            qual_fh = StringIO()
+            fn(obj, fasta_fh, id_whitespace_replacement='*',
+               description_newline_replacement='+', max_width=3, qual=qual_fh)
+            obs_fasta = fasta_fh.getvalue()
+            obs_qual = qual_fh.getvalue()
+            fasta_fh.close()
+            qual_fh.close()
+
+            with open(get_data_path('fasta_3_seqs_non_defaults'), 'U') as fh:
+                exp_fasta = fh.read()
+            with open(get_data_path('qual_3_seqs_non_defaults'), 'U') as fh:
+                exp_qual = fh.read()
+
+            self.assertEqual(obs_fasta, exp_fasta)
+            self.assertEqual(obs_qual, exp_qual)
+
+
+class RoundtripTests(TestCase):
+    def test_roundtrip_generators(self):
+        # test that fasta and qual files can be streamed into memory and back
+        # out to disk using generator reader and writer
+        fps = list(map(lambda e: list(map(get_data_path, e)),
+                       [('empty', 'empty'),
+                        ('fasta_multi_seq_roundtrip',
+                         'qual_multi_seq_roundtrip')]))
+
+        for fasta_fp, qual_fp in fps:
+            with open(fasta_fp, 'U') as fh:
+                exp_fasta = fh.read()
+            with open(qual_fp, 'U') as fh:
+                exp_qual = fh.read()
+
+            fasta_fh = StringIO()
+            qual_fh = StringIO()
+            _generator_to_fasta(_fasta_to_generator(fasta_fp, qual=qual_fp),
+                                fasta_fh, qual=qual_fh)
+            obs_fasta = fasta_fh.getvalue()
+            obs_qual = qual_fh.getvalue()
+            fasta_fh.close()
+            qual_fh.close()
+
+            self.assertEqual(obs_fasta, exp_fasta)
+            self.assertEqual(obs_qual, exp_qual)
+
+    def test_roundtrip_sequence_collections_and_alignments(self):
+        fps = list(map(lambda e: list(map(get_data_path, e)),
+                       [('empty', 'empty'),
+                        ('fasta_sequence_collection_different_type',
+                         'qual_sequence_collection_different_type')]))
+
+        for reader, writer in ((_fasta_to_sequence_collection,
+                                _sequence_collection_to_fasta),
+                               (_fasta_to_alignment,
+                                _alignment_to_fasta)):
+            for fasta_fp, qual_fp in fps:
+                # read
+                obj1 = reader(fasta_fp, qual=qual_fp)
+
+                # write
+                fasta_fh = StringIO()
+                qual_fh = StringIO()
+                writer(obj1, fasta_fh, qual=qual_fh)
+                fasta_fh.seek(0)
+                qual_fh.seek(0)
+
+                # read
+                obj2 = reader(fasta_fh, qual=qual_fh)
+                fasta_fh.close()
+                qual_fh.close()
+
+                # TODO remove this custom equality testing code when
+                # SequenceCollection has an equals method (part of #656).
+                # We need this method to include IDs and description in the
+                # comparison (not part of SequenceCollection.__eq__).
+                self.assertEqual(obj1, obj2)
+                for s1, s2 in zip(obj1, obj2):
+                    self.assertTrue(s1.equals(s2))
+
+    def test_roundtrip_biological_sequences(self):
+        fps = list(map(lambda e: list(map(get_data_path, e)),
+                       [('fasta_multi_seq_roundtrip',
+                         'qual_multi_seq_roundtrip'),
+                        ('fasta_sequence_collection_different_type',
+                         'qual_sequence_collection_different_type')]))
+
+        for reader, writer in ((_fasta_to_biological_sequence,
+                                _biological_sequence_to_fasta),
+                               (_fasta_to_nucleotide_sequence,
+                                _nucleotide_sequence_to_fasta),
+                               (_fasta_to_dna_sequence,
+                                _dna_sequence_to_fasta),
+                               (_fasta_to_rna_sequence,
+                                _rna_sequence_to_fasta),
+                               (_fasta_to_protein_sequence,
+                                _protein_sequence_to_fasta)):
+            for fasta_fp, qual_fp in fps:
+                # read
+                obj1 = reader(fasta_fp, qual=qual_fp)
+
+                # write
+                fasta_fh = StringIO()
+                qual_fh = StringIO()
+                writer(obj1, fasta_fh, qual=qual_fh)
+                fasta_fh.seek(0)
+                qual_fh.seek(0)
+
+                # read
+                obj2 = reader(fasta_fh, qual=qual_fh)
+                fasta_fh.close()
+                qual_fh.close()
+
+                self.assertTrue(obj1.equals(obj2))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/io/tests/test_fastq.py b/skbio/io/tests/test_fastq.py
new file mode 100644
index 0000000..9da5b6c
--- /dev/null
+++ b/skbio/io/tests/test_fastq.py
@@ -0,0 +1,551 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import zip
+from six import StringIO
+
+import unittest
+import warnings
+
+from skbio import (read, write, BiologicalSequence, NucleotideSequence,
+                   DNASequence, RNASequence, ProteinSequence,
+                   SequenceCollection, Alignment)
+from skbio.io import FASTQFormatError
+from skbio.io.fastq import (
+    _fastq_sniffer, _fastq_to_generator, _fastq_to_sequence_collection,
+    _fastq_to_alignment, _generator_to_fastq, _sequence_collection_to_fastq,
+    _alignment_to_fastq)
+
+from skbio.util import get_data_path
+
+# Note: the example FASTQ files with file extension .fastq are taken from the
+# following open-access publication's supplementary data:
+#
+# P.J.A. Cock, C.J. Fields, N. Goto, M.L. Heuer and P.M. Rice (2009). The
+# Sanger FASTQ file format for sequences with quality scores, and the
+# Solexa/Illumina FASTQ variants.
+#
+# See licenses/fastq-example-files-readme.txt for the original README that
+# accompanied these files, which includes the terms of use and detailed
+# description of the files.
+#
+# The example files bearing the original filenames have not been modified from
+# their original form.
+
+
+def _drop_kwargs(kwargs, *args):
+    for arg in args:
+        if arg in kwargs:
+            kwargs.pop(arg)
+
+
+class TestSniffer(unittest.TestCase):
+    def setUp(self):
+        self.positives = [get_data_path(e) for e in [
+            'fastq_multi_seq_sanger',
+            'fastq_single_seq_illumina1.3',
+            'fastq_wrapping_as_illumina_no_description',
+            'fastq_wrapping_as_sanger_no_description',
+            'fastq_wrapping_original_sanger_no_description',
+            'fastq_writer_illumina1.3_defaults',
+            'fastq_writer_sanger_defaults',
+            'fastq_writer_sanger_non_defaults',
+            'illumina_full_range_as_illumina.fastq',
+            'illumina_full_range_as_sanger.fastq',
+            'illumina_full_range_original_illumina.fastq',
+            'longreads_as_illumina.fastq',
+            'longreads_as_sanger.fastq',
+            'longreads_original_sanger.fastq',
+            'misc_dna_as_illumina.fastq',
+            'misc_dna_as_sanger.fastq',
+            'misc_dna_original_sanger.fastq',
+            'misc_rna_as_illumina.fastq',
+            'misc_rna_as_sanger.fastq',
+            'misc_rna_original_sanger.fastq',
+            'sanger_full_range_as_illumina.fastq',
+            'sanger_full_range_as_sanger.fastq',
+            'sanger_full_range_original_sanger.fastq',
+            'solexa_full_range_original_solexa.fastq',
+            'wrapping_as_illumina.fastq',
+            'wrapping_as_sanger.fastq',
+            'wrapping_original_sanger.fastq'
+        ]]
+
+        self.negatives = [get_data_path(e) for e in [
+            'empty',
+            'whitespace_only',
+            'fastq_invalid_missing_header',
+            'fastq_invalid_missing_seq_data',
+            'error_diff_ids.fastq',
+            'error_double_qual.fastq',
+            'error_double_seq.fastq',
+            'error_long_qual.fastq',
+            'error_no_qual.fastq',
+            'error_qual_del.fastq',
+            'error_qual_escape.fastq',
+            'error_qual_null.fastq',
+            'error_qual_space.fastq',
+            'error_qual_tab.fastq',
+            'error_qual_unit_sep.fastq',
+            'error_qual_vtab.fastq',
+            'error_short_qual.fastq',
+            'error_spaces.fastq',
+            'error_tabs.fastq',
+            'error_trunc_at_seq.fastq',
+            'error_trunc_at_plus.fastq',
+            'error_trunc_at_qual.fastq',
+            'error_trunc_in_title.fastq',
+            'error_trunc_in_seq.fastq',
+            'error_trunc_in_plus.fastq',
+            'error_trunc_in_qual.fastq',
+        ]]
+
+    def test_positives(self):
+        for fp in self.positives:
+            self.assertEqual(_fastq_sniffer(fp), (True, {}))
+
+    def test_negatives(self):
+        for fp in self.negatives:
+            self.assertEqual(_fastq_sniffer(fp), (False, {}))
+
+
+class TestReaders(unittest.TestCase):
+    def setUp(self):
+        self.valid_files = [
+            (get_data_path('empty'),
+             [{},
+              {'variant': 'illumina1.8'},
+              {'phred_offset': 33, 'constructor': DNASequence}],
+             []),
+
+            (get_data_path('fastq_single_seq_illumina1.3'), [
+                {'variant': 'illumina1.3'},
+                {'phred_offset': 64},
+                {'variant': 'illumina1.3', 'constructor': ProteinSequence},
+            ], [
+                ('', 'bar\t baz', 'ACGT', [33, 34, 35, 36])
+            ]),
+
+            (get_data_path('fastq_multi_seq_sanger'), [
+                {'variant': 'sanger'},
+                {'phred_offset': 33, 'seq_num': 2},
+                {'variant': 'sanger', 'constructor': RNASequence,
+                 'seq_num': 3},
+            ], [
+                ('foo', 'bar baz', 'AACCGG',
+                 [16, 17, 18, 19, 20, 21]),
+                ('bar', 'baz foo', 'TTGGCC',
+                 [23, 22, 21, 20, 19, 18]),
+                ('baz', 'foo bar', 'GATTTC',
+                 [20, 21, 22, 23, 24, 18])
+            ]),
+        ]
+
+        self.invalid_files = [(get_data_path(e[0]), e[1], e[2]) for e in [
+            ('whitespace_only', FASTQFormatError, 'blank line.*FASTQ'),
+
+            ('fastq_invalid_missing_header', FASTQFormatError,
+             "sequence.*header.*start of file: 'seq1 desc1'"),
+
+            ('fastq_invalid_missing_seq_data', FASTQFormatError,
+             'without sequence data'),
+
+            ('error_diff_ids.fastq', FASTQFormatError,
+             "header lines do not match: "
+             "'SLXA-B3_649_FC8437_R1_1_1_850_123' != "
+             "'SLXA-B3_649_FC8437_R1_1_1_850_124'"),
+
+            ('error_double_qual.fastq', FASTQFormatError,
+             "Extra quality.*'\+SLXA-B3_649_FC8437_R1_1_1_850_123'"),
+
+            ('error_double_seq.fastq', FASTQFormatError,
+             'FASTQ record that is missing a quality \(\+\) header line'),
+
+            ('error_long_qual.fastq', FASTQFormatError, "Extra quality.*'Y'"),
+
+            ('error_no_qual.fastq', FASTQFormatError,
+             'blank line.*FASTQ'),
+
+            ('error_qual_del.fastq', ValueError,
+             'Decoded Phred score.*out of range'),
+
+            ('error_qual_escape.fastq', ValueError,
+             'Decoded Phred score.*out of range'),
+
+            ('error_qual_null.fastq', ValueError,
+             'Decoded Phred score.*out of range'),
+
+            ('error_qual_space.fastq', ValueError,
+             'Decoded Phred score.*out of range'),
+
+            ('error_qual_tab.fastq', ValueError,
+             'Decoded Phred score.*out of range'),
+
+            ('error_qual_unit_sep.fastq', ValueError,
+             'Decoded Phred score.*out of range'),
+
+            ('error_qual_vtab.fastq', ValueError,
+             'Decoded Phred score.*out of range'),
+
+            ('error_short_qual.fastq', FASTQFormatError,
+             "Extra quality.*'SLXA-B3_649_FC8437_R1_1_1_362_549'"),
+
+            ('error_spaces.fastq', FASTQFormatError,
+             "whitespace.*sequence data: 'GATGTGCAA TACCTTTGTA GAGGAA'"),
+
+            ('error_tabs.fastq', FASTQFormatError,
+             r"whitespace.*sequence data: 'GATGTGCAA\\tTACCTTTGTA\\tGAGGAA'"),
+
+            ('error_trunc_at_seq.fastq', FASTQFormatError,
+             'blank line.*FASTQ'),
+
+            ('error_trunc_at_plus.fastq', FASTQFormatError,
+             'blank line.*FASTQ'),
+
+            ('error_trunc_at_qual.fastq', FASTQFormatError,
+             'incomplete/truncated.*end of file'),
+
+            ('error_trunc_in_title.fastq', FASTQFormatError,
+             'incomplete/truncated.*end of file'),
+
+            ('error_trunc_in_seq.fastq', FASTQFormatError,
+             'incomplete/truncated.*end of file'),
+
+            ('error_trunc_in_plus.fastq', FASTQFormatError,
+             "header lines do not match: "
+             "'SLXA-B3_649_FC8437_R1_1_1_183_714' != 'SLXA-B3_649_FC'"),
+
+            ('error_trunc_in_qual.fastq', FASTQFormatError,
+             'incomplete/truncated.*end of file')
+        ]]
+
+    def test_fastq_to_generator_valid_files(self):
+        for valid, kwargs, components in self.valid_files:
+            for kwarg in kwargs:
+                _drop_kwargs(kwarg, 'seq_num')
+                constructor = kwarg.get('constructor', BiologicalSequence)
+                expected = [constructor(c[2], id=c[0], description=c[1],
+                            quality=c[3]) for c in components]
+
+                observed = list(_fastq_to_generator(valid, **kwarg))
+                self.assertEqual(len(expected), len(observed))
+                for o, e in zip(observed, expected):
+                    self.assertTrue(o.equals(e))
+
+    def test_fastq_to_generator_invalid_files_all_variants(self):
+        # files that should be invalid for all variants, as well as custom
+        # phred offsets
+        for fp, error_type, error_msg_regex in self.invalid_files:
+            for variant in 'sanger', 'illumina1.3', 'illumina1.8':
+                with self.assertRaisesRegexp(error_type, error_msg_regex):
+                    list(_fastq_to_generator(fp, variant=variant))
+
+            for offset in 33, 64, 40, 77:
+                with self.assertRaisesRegexp(error_type, error_msg_regex):
+                    list(_fastq_to_generator(fp, phred_offset=offset))
+
+    def test_fastq_to_generator_invalid_files_illumina(self):
+        # files that should be invalid for illumina1.3 and illumina1.8 variants
+        fps = [get_data_path(fp) for fp in
+               ['sanger_full_range_original_sanger.fastq',
+               'solexa_full_range_original_solexa.fastq']]
+
+        for fp in fps:
+            with self.assertRaisesRegexp(ValueError, 'out of range \[0, 62\]'):
+                list(_fastq_to_generator(fp, variant='illumina1.3'))
+            with self.assertRaisesRegexp(ValueError, 'out of range \[0, 62\]'):
+                list(_fastq_to_generator(fp, variant='illumina1.8'))
+
+    def test_fastq_to_generator_solexa(self):
+        # solexa support isn't implemented yet. should raise error even with
+        # valid solexa file
+        with self.assertRaises(NotImplementedError):
+            list(_fastq_to_generator(
+                get_data_path('solexa_full_range_original_solexa.fastq'),
+                variant='solexa'))
+
+    def test_fastq_to_sequence(self):
+        for constructor in [BiologicalSequence, NucleotideSequence,
+                            DNASequence, RNASequence, ProteinSequence]:
+            for valid, kwargs, components in self.valid_files:
+                # skip empty file case since we cannot read a specific sequence
+                # from an empty file
+                if len(components) == 0:
+                    continue
+
+                for kwarg in kwargs:
+                    _drop_kwargs(kwarg, 'constructor')
+
+                    seq_num = kwarg.get('seq_num', 1)
+                    c = components[seq_num - 1]
+                    expected = constructor(c[2], id=c[0], description=c[1],
+                                           quality=c[3])
+
+                    observed = read(valid, into=constructor, format='fastq',
+                                    verify=False, **kwarg)
+                    self.assertTrue(observed.equals(expected))
+
+    def test_fastq_to_sequence_collection(self):
+        for valid, kwargs, components in self.valid_files:
+            for kwarg in kwargs:
+                _drop_kwargs(kwarg, 'seq_num')
+                constructor = kwarg.get('constructor', BiologicalSequence)
+                expected = SequenceCollection(
+                    [constructor(c[2], id=c[0], description=c[1], quality=c[3])
+                     for c in components])
+
+                observed = _fastq_to_sequence_collection(valid, **kwarg)
+                # TODO remove when #656 is resolved
+                self.assertEqual(observed, expected)
+                for o, e in zip(observed, expected):
+                    self.assertTrue(o.equals(e))
+
+    def test_fastq_to_alignment(self):
+        for valid, kwargs, components in self.valid_files:
+            for kwarg in kwargs:
+                _drop_kwargs(kwarg, 'seq_num')
+                constructor = kwarg.get('constructor', BiologicalSequence)
+                expected = Alignment(
+                    [constructor(c[2], id=c[0], description=c[1], quality=c[3])
+                     for c in components])
+
+                observed = _fastq_to_alignment(valid, **kwarg)
+                # TODO remove when #656 is resolved
+                self.assertEqual(observed, expected)
+                for o, e in zip(observed, expected):
+                    self.assertTrue(o.equals(e))
+
+
+class TestWriters(unittest.TestCase):
+    def setUp(self):
+        self.valid_files = [
+            ([
+                ('f o  o', 'bar\n\nbaz', 'AACCGG',
+                 [16, 17, 18, 19, 20, 21]),
+                ('bar', 'baz foo', 'TTGGCC',
+                 [23, 22, 21, 20, 19, 18]),
+                ('ba\n\t\tz', 'foo bar', 'GATTTC',
+                 [20, 21, 22, 23, 24, 18])
+            ], [
+                ({'variant': 'sanger'},
+                 get_data_path('fastq_writer_sanger_defaults')),
+                ({'phred_offset': 33},
+                 get_data_path('fastq_writer_sanger_defaults')),
+                ({'variant': 'illumina1.8'},
+                 get_data_path('fastq_writer_sanger_defaults')),
+                ({'variant': 'illumina1.3'},
+                 get_data_path('fastq_writer_illumina1.3_defaults')),
+                ({'variant': 'sanger', 'id_whitespace_replacement': '%',
+                  'description_newline_replacement': '^'},
+                 get_data_path('fastq_writer_sanger_non_defaults'))
+            ]),
+        ]
+
+    def test_generator_to_fastq_kwargs_passed(self):
+        for components, kwargs_expected_fp in self.valid_files:
+            for kwargs, expected_fp in kwargs_expected_fp:
+                def gen():
+                    for c in components:
+                        yield BiologicalSequence(
+                            c[2], id=c[0], description=c[1], quality=c[3])
+
+                fh = StringIO()
+                _generator_to_fastq(gen(), fh, **kwargs)
+                observed = fh.getvalue()
+                fh.close()
+
+                with open(expected_fp, 'U') as f:
+                    expected = f.read()
+
+                self.assertEqual(observed, expected)
+
+    def test_sequence_to_fastq_kwargs_passed(self):
+        for constructor in [BiologicalSequence, NucleotideSequence,
+                            DNASequence, RNASequence, ProteinSequence]:
+            for components, kwargs_expected_fp in self.valid_files:
+                for kwargs, expected_fp in kwargs_expected_fp:
+                    fh = StringIO()
+                    for c in components:
+                        obj = constructor(c[2], id=c[0], description=c[1],
+                                          quality=c[3])
+                        write(obj, into=fh, format='fastq', **kwargs)
+
+                    observed = fh.getvalue()
+                    fh.close()
+
+                    with open(expected_fp, 'U') as f:
+                        expected = f.read()
+
+                    self.assertEqual(observed, expected)
+
+    def test_sequence_collection_to_fastq_kwargs_passed(self):
+        for components, kwargs_expected_fp in self.valid_files:
+            for kwargs, expected_fp in kwargs_expected_fp:
+                obj = SequenceCollection([
+                    NucleotideSequence(c[2], id=c[0], description=c[1],
+                                       quality=c[3]) for c in components])
+
+                fh = StringIO()
+                _sequence_collection_to_fastq(obj, fh, **kwargs)
+                observed = fh.getvalue()
+                fh.close()
+
+                with open(expected_fp, 'U') as f:
+                    expected = f.read()
+
+                self.assertEqual(observed, expected)
+
+    def test_alignment_to_fastq_kwargs_passed(self):
+        for components, kwargs_expected_fp in self.valid_files:
+            for kwargs, expected_fp in kwargs_expected_fp:
+                obj = Alignment([
+                    ProteinSequence(c[2], id=c[0], description=c[1],
+                                    quality=c[3]) for c in components])
+
+                fh = StringIO()
+                _alignment_to_fastq(obj, fh, **kwargs)
+                observed = fh.getvalue()
+                fh.close()
+
+                with open(expected_fp, 'U') as f:
+                    expected = f.read()
+
+                self.assertEqual(observed, expected)
+
+    def test_generator_to_fastq_no_qual(self):
+        def gen():
+            yield BiologicalSequence('ACGT', id='foo', description='bar',
+                                     quality=range(4))
+            yield BiologicalSequence('ACG', id='foo', description='bar')
+
+        with self.assertRaisesRegexp(ValueError, '2nd.*quality scores'):
+            _generator_to_fastq(gen(), StringIO(), variant='illumina1.8')
+
+
+class TestConversions(unittest.TestCase):
+    def setUp(self):
+        self.conversions = [
+            (get_data_path('empty'),
+             get_data_path('empty'), [
+                 ({'variant': 'sanger'}, {'phred_offset': 42}),
+            ]),
+
+            (get_data_path('longreads_original_sanger.fastq'),
+             get_data_path('longreads_as_sanger.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'sanger'}),
+                 ({'phred_offset': 33}, {'variant': 'sanger'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 33})
+            ]),
+            (get_data_path('longreads_original_sanger.fastq'),
+             get_data_path('longreads_as_illumina.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
+                 ({'phred_offset': 33}, {'variant': 'illumina1.3'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 64})
+            ]),
+
+            (get_data_path('wrapping_original_sanger.fastq'),
+             get_data_path('wrapping_as_sanger.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'sanger'}),
+                 ({'phred_offset': 33}, {'variant': 'sanger'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 33})
+            ]),
+            (get_data_path('wrapping_original_sanger.fastq'),
+             get_data_path('wrapping_as_illumina.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
+                 ({'phred_offset': 33}, {'variant': 'illumina1.3'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 64})
+            ]),
+
+            (get_data_path('sanger_full_range_original_sanger.fastq'),
+             get_data_path('sanger_full_range_as_sanger.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'sanger'}),
+                 ({'phred_offset': 33}, {'variant': 'sanger'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 33})
+            ]),
+            (get_data_path('sanger_full_range_original_sanger.fastq'),
+             get_data_path('sanger_full_range_as_illumina.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
+                 ({'phred_offset': 33}, {'variant': 'illumina1.3'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 64})
+            ]),
+
+            (get_data_path('illumina_full_range_original_illumina.fastq'),
+             get_data_path('illumina_full_range_as_illumina.fastq'), [
+                 ({'variant': 'illumina1.3'}, {'variant': 'illumina1.3'}),
+                 ({'phred_offset': 64}, {'variant': 'illumina1.3'}),
+                 ({'variant': 'illumina1.3'}, {'phred_offset': 64})
+            ]),
+            (get_data_path('illumina_full_range_original_illumina.fastq'),
+             get_data_path('illumina_full_range_as_sanger.fastq'), [
+                 ({'variant': 'illumina1.3'}, {'variant': 'sanger'}),
+                 ({'phred_offset': 64}, {'variant': 'sanger'}),
+                 ({'variant': 'illumina1.3'}, {'phred_offset': 33})
+            ]),
+
+            (get_data_path('misc_dna_original_sanger.fastq'),
+             get_data_path('misc_dna_as_sanger.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'sanger'}),
+                 ({'phred_offset': 33}, {'variant': 'sanger'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 33})
+            ]),
+            (get_data_path('misc_dna_original_sanger.fastq'),
+             get_data_path('misc_dna_as_illumina.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
+                 ({'phred_offset': 33}, {'variant': 'illumina1.3'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 64})
+            ]),
+
+            (get_data_path('misc_rna_original_sanger.fastq'),
+             get_data_path('misc_rna_as_sanger.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'sanger'}),
+                 ({'phred_offset': 33}, {'variant': 'sanger'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 33})
+            ]),
+            (get_data_path('misc_rna_original_sanger.fastq'),
+             get_data_path('misc_rna_as_illumina.fastq'), [
+                 ({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
+                 ({'phred_offset': 33}, {'variant': 'illumina1.3'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 64})
+            ]),
+
+            (get_data_path('fastq_wrapping_original_sanger_no_description'),
+             get_data_path('fastq_wrapping_as_sanger_no_description'), [
+                 ({'variant': 'sanger'}, {'variant': 'sanger'}),
+                 ({'phred_offset': 33}, {'variant': 'sanger'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 33})
+            ]),
+            (get_data_path('fastq_wrapping_original_sanger_no_description'),
+             get_data_path('fastq_wrapping_as_illumina_no_description'), [
+                 ({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
+                 ({'phred_offset': 33}, {'variant': 'illumina1.3'}),
+                 ({'variant': 'sanger'}, {'phred_offset': 64})
+            ]),
+        ]
+
+    def test_conversion(self):
+        for from_fp, to_fp, kwargs in self.conversions:
+            for from_kwargs, to_kwargs in kwargs:
+                read_gen = _fastq_to_generator(from_fp, **from_kwargs)
+                fh = StringIO()
+
+                # will issue warning when truncating quality scores
+                with warnings.catch_warnings(record=True):
+                    warnings.simplefilter("ignore")
+                    _generator_to_fastq(read_gen, fh, **to_kwargs)
+
+                obs = fh.getvalue()
+                fh.close()
+
+                with open(to_fp, 'U') as fh:
+                    exp = fh.read()
+                self.assertEqual(obs, exp)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/tests/test_lsmat.py b/skbio/io/tests/test_lsmat.py
new file mode 100644
index 0000000..f66f06e
--- /dev/null
+++ b/skbio/io/tests/test_lsmat.py
@@ -0,0 +1,251 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+
+from unittest import TestCase, main
+
+from skbio import DistanceMatrix
+from skbio.io import LSMatFormatError
+from skbio.io.lsmat import (
+    _lsmat_to_dissimilarity_matrix, _lsmat_to_distance_matrix,
+    _dissimilarity_matrix_to_lsmat, _distance_matrix_to_lsmat, _lsmat_sniffer)
+from skbio.stats.distance import DissimilarityMatrix, DistanceMatrixError
+
+
+class LSMatTestData(TestCase):
+    def setUp(self):
+        self.lsmat_1x1_fh = StringIO(LSMat_1x1)
+        self.lsmat_2x2_fh = StringIO(LSMat_2x2)
+        self.lsmat_2x2_asym_fh = StringIO(LSMat_2x2_ASYM)
+        self.lsmat_3x3_fh = StringIO(LSMat_3x3)
+        self.lsmat_3x3_whitespace_fh = StringIO(LSMat_3x3_WHITESPACE)
+        self.lsmat_3x3_csv_fh = StringIO(LSMat_3x3_CSV)
+
+        self.valid_fhs = [
+            self.lsmat_1x1_fh,
+            self.lsmat_2x2_fh,
+            self.lsmat_2x2_asym_fh,
+            self.lsmat_3x3_fh,
+            self.lsmat_3x3_whitespace_fh
+        ]
+
+        self.empty_fh = StringIO()
+        self.invalid_1_fh = StringIO(INVALID_1)
+        self.invalid_2_fh = StringIO(INVALID_2)
+        self.invalid_3_fh = StringIO(INVALID_3)
+        self.invalid_4_fh = StringIO(INVALID_4)
+        self.invalid_5_fh = StringIO(INVALID_5)
+        self.invalid_6_fh = StringIO(INVALID_6)
+
+        self.invalid_fhs = [
+            (self.empty_fh, 'empty'),
+            (self.invalid_1_fh, '1 value\(s\).*2.*\(2\)'),
+            (self.invalid_2_fh, "'b'.*'a'"),
+            (self.invalid_3_fh, 'extra row\(s\)'),
+            (self.invalid_4_fh, '2 row\(s\).*found 1'),
+            (self.invalid_5_fh, '2 row\(s\).*found 0'),
+            (self.invalid_6_fh, r"delimiter '\\t'")
+        ]
+
+
+class DissimilarityAndDistanceMatrixReaderWriterTests(LSMatTestData):
+    def setUp(self):
+        super(DissimilarityAndDistanceMatrixReaderWriterTests, self).setUp()
+
+        self.lsmat_1x1_data = [[0.0]]
+        self.lsmat_2x2_data = [[0.0, 0.123], [0.123, 0.0]]
+        self.lsmat_2x2_asym_data = [[0.0, 1.0], [-2.0, 0.0]]
+        self.lsmat_3x3_data = [[0.0, 0.01, 4.2], [0.01, 0.0, 12.0],
+                               [4.2, 12.0, 0.0]]
+
+        # We repeat the 3x3 example because there are two file format
+        # representations of it, one that is messy and one that is not. Both
+        # should be read into an equivalent object and written to an equivalent
+        # format though, which is why we duplicate the 3x3 objects and strings.
+        self.dissim_objs = [
+            DissimilarityMatrix(self.lsmat_1x1_data, ['a']),
+            DissimilarityMatrix(self.lsmat_2x2_data, ['a', 'b']),
+            DissimilarityMatrix(self.lsmat_2x2_asym_data, ['a', 'b']),
+            DissimilarityMatrix(self.lsmat_3x3_data, ['a', 'b', 'c']),
+            DissimilarityMatrix(self.lsmat_3x3_data, ['a', 'b', 'c'])
+        ]
+
+        self.dissim_strs = [LSMat_1x1, LSMat_2x2, LSMat_2x2_ASYM, LSMat_3x3,
+                            LSMat_3x3]
+
+        self.dissim_fhs = [self.lsmat_1x1_fh, self.lsmat_2x2_fh,
+                           self.lsmat_2x2_asym_fh, self.lsmat_3x3_fh,
+                           self.lsmat_3x3_whitespace_fh]
+
+        self.dist_objs = [
+            DistanceMatrix(self.lsmat_1x1_data, ['a']),
+            DistanceMatrix(self.lsmat_2x2_data, ['a', 'b']),
+            DistanceMatrix(self.lsmat_3x3_data, ['a', 'b', 'c']),
+            DistanceMatrix(self.lsmat_3x3_data, ['a', 'b', 'c'])
+        ]
+
+        self.dist_strs = [LSMat_1x1, LSMat_2x2, LSMat_3x3, LSMat_3x3]
+
+        self.dist_fhs = [self.lsmat_1x1_fh, self.lsmat_2x2_fh,
+                         self.lsmat_3x3_fh, self.lsmat_3x3_whitespace_fh]
+
+    def test_read_valid_files(self):
+        for fn, cls, objs, fhs in ((_lsmat_to_dissimilarity_matrix,
+                                    DissimilarityMatrix, self.dissim_objs,
+                                    self.dissim_fhs),
+                                   (_lsmat_to_distance_matrix, DistanceMatrix,
+                                    self.dist_objs, self.dist_fhs)):
+            for fh, obj in zip(fhs, objs):
+                fh.seek(0)
+                obs = fn(fh)
+                self.assertEqual(obs, obj)
+                self.assertIsInstance(obs, cls)
+
+        # Above files are TSV (default delimiter). Test that CSV works too.
+        for fn, cls in ((_lsmat_to_dissimilarity_matrix, DissimilarityMatrix),
+                        (_lsmat_to_distance_matrix, DistanceMatrix)):
+            exp = cls(self.lsmat_3x3_data, ['a', 'b', 'c'])
+            self.lsmat_3x3_csv_fh.seek(0)
+            obs = fn(self.lsmat_3x3_csv_fh, delimiter=',')
+            self.assertEqual(obs, exp)
+            self.assertIsInstance(obs, cls)
+
+    def test_read_invalid_files(self):
+        for fn in _lsmat_to_dissimilarity_matrix, _lsmat_to_distance_matrix:
+            for invalid_fh, error_msg_regexp in self.invalid_fhs:
+                with self.assertRaisesRegexp(LSMatFormatError,
+                                             error_msg_regexp):
+                    invalid_fh.seek(0)
+                    fn(invalid_fh)
+
+        # Asymmetric data only raises an error for DistanceMatrix.
+        with self.assertRaises(DistanceMatrixError):
+            _lsmat_to_distance_matrix(self.lsmat_2x2_asym_fh)
+
+    def test_write(self):
+        for fn, objs, strs in ((_dissimilarity_matrix_to_lsmat,
+                                self.dissim_objs, self.dissim_strs),
+                               (_distance_matrix_to_lsmat, self.dist_objs,
+                                self.dist_strs)):
+            for obj, str_ in zip(objs, strs):
+                fh = StringIO()
+                fn(obj, fh)
+                obs = fh.getvalue()
+                fh.close()
+                self.assertEqual(obs, str_)
+
+        # Test writing CSV (TSV is written above).
+        for fn, cls in ((_dissimilarity_matrix_to_lsmat, DissimilarityMatrix),
+                        (_distance_matrix_to_lsmat, DistanceMatrix)):
+            obj = cls(self.lsmat_3x3_data, ['a', 'b', 'c'])
+            fh = StringIO()
+            fn(obj, fh, delimiter=',')
+            obs = fh.getvalue()
+            fh.close()
+            self.assertEqual(obs, LSMat_3x3_CSV)
+
+    def test_roundtrip_read_write(self):
+        for reader_fn, writer_fn, fhs in ((_lsmat_to_dissimilarity_matrix,
+                                           _dissimilarity_matrix_to_lsmat,
+                                           self.dissim_fhs),
+                                          (_lsmat_to_distance_matrix,
+                                           _distance_matrix_to_lsmat,
+                                           self.dist_fhs)):
+            for fh in fhs:
+                # Read.
+                fh.seek(0)
+                lsmat1 = reader_fn(fh)
+
+                # Write.
+                out_fh = StringIO()
+                writer_fn(lsmat1, out_fh)
+                out_fh.seek(0)
+
+                # Read.
+                lsmat2 = reader_fn(out_fh)
+                out_fh.close()
+
+                self.assertEqual(lsmat1, lsmat2)
+
+
+class SnifferTests(LSMatTestData):
+    def setUp(self):
+        super(SnifferTests, self).setUp()
+
+    def test_match_tsv(self):
+        # Sniffer should match all valid files, and will match some invalid
+        # ones too because it doesn't exhaustively check the entire file.
+        fhs = self.valid_fhs + [self.invalid_1_fh, self.invalid_3_fh,
+                                self.invalid_4_fh]
+        for fh in fhs:
+            self.assertEqual(_lsmat_sniffer(fh), (True, {'delimiter': '\t'}))
+
+    def test_match_csv(self):
+        self.assertEqual(_lsmat_sniffer(self.lsmat_3x3_csv_fh),
+                         (True, {'delimiter': ','}))
+
+    def test_no_match(self):
+        for fh in (self.empty_fh, self.invalid_2_fh, self.invalid_5_fh,
+                   self.invalid_6_fh):
+            self.assertEqual(_lsmat_sniffer(fh), (False, {}))
+
+
+LSMat_1x1 = "\ta\na\t0.0\n"
+
+LSMat_2x2 = "\ta\tb\na\t0.0\t0.123\nb\t0.123\t0.0\n"
+
+LSMat_2x2_ASYM = "\ta\tb\na\t0.0\t1.0\nb\t-2.0\t0.0\n"
+
+LSMat_3x3 = ("\ta\tb\tc\na\t0.0\t0.01\t4.2\nb\t0.01\t0.0\t12.0\nc\t4.2\t12.0\t"
+             "0.0\n")
+
+# Extra whitespace-only lines throughout. Also has comments before the header.
+LSMat_3x3_WHITESPACE = '\n'.join(['# foo',
+                                  '      \t \t ',
+                                  ' #bar',
+                                  '',
+                                  '',
+                                  '\ta\t b \tc',
+                                  'a  \t0.0\t0.01\t4.2',
+                                  '     \t',
+                                  'b\t0.01\t0.0\t12.0',
+                                  '',
+                                  '\t     \t',
+                                  '',
+                                  'c\t4.2\t12.0\t0.0',
+                                  '',
+                                  '   \t ',
+                                  '\t\t\t',
+                                  ' '])
+
+# Same matrix as above, but delimited by commas instead of tabs.
+LSMat_3x3_CSV = ",a,b,c\na,0.0,0.01,4.2\nb,0.01,0.0,12.0\nc,4.2,12.0,0.0\n"
+
+# missing data
+INVALID_1 = '\ta\tb\na\t0\t1\nb\t1'
+
+# mismatched IDs
+INVALID_2 = '\ta\tb\nb\t0\t1\na\t1\t0'
+
+# extra data lines
+INVALID_3 = '\ta\tb\na\t0\t1\nb\t1\t0\n  \nfoo\n\n\n'
+
+# missing data lines
+INVALID_4 = '\ta\tb\na\t0\t1\n  \n'
+
+# no data lines
+INVALID_5 = '\ta\tb\n'
+
+# missing leading delimiter in header
+INVALID_6 = "a\tb\na\t0.0\t0.123\nb\t0.123\t0.0\n"
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/io/tests/test_newick.py b/skbio/io/tests/test_newick.py
new file mode 100644
index 0000000..5d549c3
--- /dev/null
+++ b/skbio/io/tests/test_newick.py
@@ -0,0 +1,371 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+
+import unittest
+
+from skbio import TreeNode
+from skbio.io import NewickFormatError
+from skbio.io.newick import (_newick_to_tree_node, _tree_node_to_newick,
+                             _newick_sniffer)
+
+
+class TestNewick(unittest.TestCase):
+    def _assert_node_equal(self, n1, n2):
+        self.assertEqual(n1.name, n2.name)
+        self.assertEqual(n1.length, n2.length)
+        self.assertEqual(len(n1.children), len(n2.children))
+
+    def _assert_equal(self, n1, n2):
+        def name(x):
+            return (str(x.name),
+                    float(x.length) if x.length is not None else 0,
+                    len(x.children))
+        self._assert_node_equal(n1, n2)
+        for c1, c2 in zip(sorted(n1.children, key=name),
+                          sorted(n2.children, key=name)):
+            self.assertTrue(c1.parent is n1)
+            self.assertTrue(c2.parent is n2)
+            self._assert_equal(c1, c2)
+
+    def _setup_tree(self, kwargs_list):
+        trees = []
+        for kwargs in kwargs_list:
+            trees.append(TreeNode(**kwargs))
+
+        trees[4].extend([trees[2], trees[3]])
+        trees[5].extend([trees[0], trees[1], trees[4]])
+
+        return trees[5]
+
+    def _setup_linked_list(self, kwargs_list):
+        last_node = None
+        for idx, kwargs in enumerate(kwargs_list):
+            new_node = TreeNode(**kwargs)
+
+            if last_node is not None:
+                new_node.append(last_node)
+            last_node = new_node
+        return last_node
+
+    def _setup_balanced_binary(self, kwargs_list):
+        trees = []
+        for kwargs in kwargs_list:
+            trees.append(TreeNode(**kwargs))
+
+        trees[0].extend([trees[2], trees[3]])
+        trees[1].extend([trees[4], trees[5]])
+        trees[6].extend([trees[0], trees[1]])
+        return trees[6]
+
+    def setUp(self):
+        # Using the factory functions above, we will construct different tree
+        # instances. Each tree is expected to serialize to the first newick
+        # string in the list. Each string in the list is expected to
+        # deserialize into an equivilent rotation of the constructed instance.
+        tree_blank = (self._setup_tree([
+            {}, {}, {}, {}, {}, {}
+        ]), [
+            "(,,(,));\n",
+            "(,(,),);",
+            "((,),,);",
+            "   ((,[ this is a comment ])      ,    ,   )    ;  ",
+            "((,[ i_can_do_this[0] or escape unmatched '[ ]),[more words],);",
+        ])
+
+        tree_leaves_named = (self._setup_tree([
+            {'name': 'a_'},
+            {'name': 'b'},
+            {'name': 'c'},
+            {'name': 'd'},
+            {},
+            {}
+        ]), [
+            "('a_',b,(c,d));\n",
+            "(b,(c,d),'a_');",
+            "(b\n,'a_'\n  ,(d \t,c) )  ;",
+        ])
+
+        tree_all_named = (self._setup_tree([
+            {'name': 'a'},
+            {'name': 'b'},
+            {'name': 'c'},
+            {'name': '[whaaat!\']'},
+            {'name': 'e'},
+            {'name': 'f'}
+        ]), [
+            "(a,b,(c,'[whaaat!'']')e)f;\n",
+            "(b,(c,'[whaaat!'']')e,a)f;",
+            "(b,[comment] \na,('[whaaat!'']',c)e)f;",
+        ])
+
+        tree_all_but_root_distances = (self._setup_tree([
+            {'length': 0.1},
+            {'length': 0.2},
+            {'length': 0.3},
+            {'length': 0.4},
+            {'length': 0.5},
+            {}
+        ]), [
+            "(:0.1,:0.2,(:0.3,:0.4):0.5);\n",
+            "(:0.2,(:0.3,:0.4):0.5,:0.1);",
+            "(:0.2,:0.1,(:0.4,:0.3):0.5);",
+        ])
+
+        tree_all_distances = (self._setup_tree([
+            {'length': 0.1},
+            {'length': 0.2},
+            {'length': 0.3},
+            {'length': 0.4},
+            {'length': 0.5},
+            {'length': 0.0}
+        ]), [
+            "(:0.1,:0.2,(:0.3,:0.4):0.5):0.0;\n",
+            "(:0.2,(:0.3,:0.4):0.5,:0.1):0.0;",
+            "(:0.2,\n:0.1,(:0.4,\n:0.3):0.5)\n:0.0;",
+        ])
+
+        tree_all_leaves_named_with_distances = (self._setup_tree([
+            {'name': 'a', 'length': 0.1},
+            {'name': 'b_a\'', 'length': 0.2},
+            {'name': 'c', 'length': 0.3},
+            {'name': 'de d', 'length': 0.4},
+            {'length': 0.5},
+            {'length': 0.0}
+        ]), [
+            "(a:0.1,'b_a''':0.2,(c:0.3,de_d:0.4):0.5):0.0;\n",
+            "('b_a''':0.2,(c:0.3,'de d':0.4):0.5,a:0.1):0.0;",
+            "('b_a''':0.2,a:0.1,('de d'[why not]:0.4,c:0.3):0.5):0.0;",
+        ])
+
+        tree_all_leaves_named_with_distances_no_root = (self._setup_tree([
+            {'name': 'a', 'length': 0.1},
+            {'name': 'b_a\'', 'length': 0.2},
+            {'name': 'c', 'length': 0.3},
+            {'name': 'de  d', 'length': 0.4},
+            {'length': 0.5},
+            {}
+        ]), [
+            "(a:0.1,'b_a''':0.2,(c:0.3,de__d:0.4):0.5);\n",
+            "('b_a''':0.2\n[comment ahoy]\n,(c:0.3,'de  d':0.4):0.5,a:0.1);",
+            "('b_a''':0.2,a:0.1,(de__d:0.4,c:0.3):0.5);"
+        ])
+
+        tree_all = (self._setup_tree([
+            {'name': 'a', 'length': 0.1},
+            {'name': 'b_a\'', 'length': 0.2},
+            {'name': 'c', 'length': 0.3},
+            {'name': 'de\' d', 'length': 0.4},
+            {'name': 'e', 'length': 0.5},
+            {'name': 'f', 'length': 0.0}
+        ]), [
+            "(a:0.1,'b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5)f:0.0;\n",
+            "('b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5,a:0.1)f:0.0;",
+            "((de''_d:0.4, c:0.3)e:0.5, 'b_a''':0.2, a:0.1)f:0.0;"
+        ])
+
+        balanced_blank = (self._setup_balanced_binary([
+            {}, {}, {}, {}, {}, {}, {}
+        ]), [
+            "((,),(,));\n",
+        ])
+
+        balanced_named = (self._setup_balanced_binary([
+            {'name': 'a'},
+            {'name': 'b'},
+            {'name': 'c'},
+            {'name': 'd'},
+            {'name': 'e'},
+            {'name': 'f'},
+            {'name': 'g'}
+        ]), [
+            "((c,d)a,(e,f)b)g;\n",
+        ])
+
+        balanced_distances = (self._setup_balanced_binary([
+            {'length': 1.0},
+            {'length': 2.0},
+            {'length': 3.0},
+            {'length': 4.0},
+            {'length': 5.0},
+            {'length': 6.0},
+            {'length': 0.0}
+        ]), [
+            "((:3.0,:4.0):1.0,(:5.0,:6.0):2.0):0.0;\n",
+        ])
+
+        blanaced_all = (self._setup_balanced_binary([
+            {'name': 'a', 'length': 1.0},
+            {'name': 'b', 'length': 2.0},
+            {'name': 'c', 'length': 3.0},
+            {'name': 'd', 'length': 4.0},
+            {'name': 'e', 'length': 5.0},
+            {'name': 'f:f\'f', 'length': 6.0},
+            {'name': 'g', 'length': 0.0}
+        ]), [
+            "((c:3.0,d:4.0)a:1.0,(e:5.0,'f:f''f':6.0)b:2.0)g:0.0;\n",
+        ])
+
+        linked_list_blank = (self._setup_linked_list([
+            {}, {}, {}, {}, {}
+        ]), [
+            "(((())));\n",
+            "[(((())));](((())));",
+            "[[(((())));](((())));](((())));\t\t\n"
+        ])
+
+        linked_list_named = (self._setup_linked_list([
+            {'name': 'aaa'},
+            {'name': 'b_a\''},
+            {'name': 'c'},
+            {'name': 'de d'},
+            {'name': 'e'},
+        ]), [
+            "((((aaa)'b_a''')c)de_d)e;\n"
+        ])
+
+        inked_list_distances = (self._setup_linked_list([
+            {'length': 0.4},
+            {'length': 0.3},
+            {'length': 0.2},
+            {'length': 0.1},
+            {'length': 0.0},
+        ]), [
+            "((((:0.4):0.3):0.2):0.1):0.0;\n",
+            "((((:0.4)[not a label]:0.3):0.2):0.1):0.0;\t\t\n"
+        ])
+
+        linked_list_all = (self._setup_linked_list([
+            {'name': 'a', 'length': 0.4},
+            {'name': 'b_a\'', 'length': 0.3},
+            {'name': 'c', 'length': 0.2},
+            {'name': 'de d', 'length': 0.1},
+            {'name': 'eee', 'length': 0.0},
+        ]), [
+            "((((a:0.4)'b_a''':0.3)c:0.2)de_d:0.1)eee:0.0;\n"
+        ])
+
+        single_empty = (TreeNode(), [";\n", "[comment about the root"
+                                     " and its properties];"])
+        single_named = (TreeNode(name='athing'), ["athing;\n"])
+        single_distance = (TreeNode(length=200.0), [":200.0;\n"])
+        single_all = (TreeNode(name='[a]', length=200.0), ["'[a]':200.0;\n"])
+
+        self.trees_newick_lists = [
+            tree_blank,
+            tree_leaves_named,
+            tree_all_named,
+            tree_all_but_root_distances,
+            tree_all_distances,
+            tree_all_leaves_named_with_distances,
+            tree_all_leaves_named_with_distances_no_root,
+            tree_all,
+            balanced_blank,
+            balanced_named,
+            balanced_distances,
+            blanaced_all,
+            linked_list_blank,
+            linked_list_named,
+            inked_list_distances,
+            linked_list_all,
+            single_empty,
+            single_named,
+            single_distance,
+            single_all
+        ]
+
+        # Invalid newick strings and list of error fragments that should be
+        # a part of the error message when read.
+        self.invalid_newicks = [
+            ("", ['root']),
+            ("This is not a newick file.", ['whitespace', 'label']),
+            ("((();", ['Parenthesis', 'unbalanced']),
+            ("(,,,)(,);\n", ['unnested', 'children']),
+            ("(()());", ['unnested', 'children']),
+            ("(():,,)", ['length']),
+            ("[][[]('comment is the gotcha':0.2,,);", ['unbalanced', 'root']),
+            ("#SampleID\tHeaderA\tHeaderB\n0\t'yellow'\t0.45;", ['whitespace',
+                                                                 'label']),
+            ("))();", ['Parenthesis', 'unbalanced']),
+            ("((,,),((,,));", ['Parenthesis', 'unbalanced']),
+            ("\n".join([",".join(str(i) for i in range(100))
+                       for _ in range(100)]), ['whitespace', 'label'])
+        ]
+
+    def test_newick_to_tree_node_valid_files(self):
+        for tree, newicks in self.trees_newick_lists:
+            for newick in newicks:
+                fh = StringIO(newick)
+                read_tree = _newick_to_tree_node(fh)
+
+                self._assert_equal(tree, read_tree)
+
+                fh.close()
+
+    def test_newick_to_tree_node_invalid_files(self):
+        for invalid, error_fragments in self.invalid_newicks:
+            fh = StringIO(invalid)
+            with self.assertRaises(NewickFormatError) as cm:
+                _newick_to_tree_node(fh)
+            for frag in error_fragments:
+                self.assertIn(frag, str(cm.exception))
+            fh.close()
+
+    def test_tree_node_to_newick(self):
+        for tree, newicks in self.trees_newick_lists:
+            newick = newicks[0]
+            fh = StringIO()
+            _tree_node_to_newick(tree, fh)
+
+            self.assertEqual(newick, fh.getvalue())
+
+            fh.close()
+
+    def test_roundtrip(self):
+        for tree, newicks in self.trees_newick_lists:
+            newick = newicks[0]
+            fh = StringIO(newick)
+            tree = _newick_to_tree_node(fh)
+            fh2 = StringIO()
+            _tree_node_to_newick(tree, fh2)
+            fh2.seek(0)
+            tree2 = _newick_to_tree_node(fh2)
+
+            self.assertEqual(newick, fh2.getvalue())
+            self._assert_equal(tree, tree2)
+
+            fh.close()
+            fh2.close()
+
+    def test_newick_to_tree_node_convert_underscores(self):
+        fh = StringIO('(_:0.1, _a, _b)__;')
+        tree = _newick_to_tree_node(fh, convert_underscores=False)
+        fh2 = StringIO()
+        _tree_node_to_newick(tree, fh2)
+        self.assertEquals(fh2.getvalue(), "('_':0.1,'_a','_b')'__';\n")
+        fh2.close()
+        fh.close()
+
+    def test_newick_sniffer_valid_files(self):
+        for _, newicks in self.trees_newick_lists:
+            for newick in newicks:
+                fh = StringIO(newick)
+                self.assertEqual(_newick_sniffer(fh), (True, {}))
+                fh.close()
+
+    def test_newick_sniffer_invalid_files(self):
+        for invalid, _ in self.invalid_newicks:
+            fh = StringIO(invalid)
+            self.assertEqual(_newick_sniffer(fh), (False, {}))
+            fh.close()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/tests/test_ordination.py b/skbio/io/tests/test_ordination.py
new file mode 100644
index 0000000..5075cbc
--- /dev/null
+++ b/skbio/io/tests/test_ordination.py
@@ -0,0 +1,228 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio.io import OrdinationFormatError
+from skbio.io.ordination import (
+    _ordination_to_ordination_results, _ordination_results_to_ordination,
+    _ordination_sniffer)
+from skbio.stats.ordination import (
+    OrdinationResults, assert_ordination_results_equal)
+from skbio.util import get_data_path
+
+
+class OrdinationTestData(TestCase):
+    def setUp(self):
+        self.valid_fps = map(
+            get_data_path,
+            ['ordination_L&L_CA_data_scores', 'ordination_example3_scores',
+             'ordination_PCoA_sample_data_3_scores',
+             'ordination_example2_scores'])
+
+        # Store filepath, regex for matching the error message that should be
+        # raised when reading the file, and whether the file should be matched
+        # by the sniffer (True) or not (False).
+        self.invalid_fps = map(lambda e: (get_data_path(e[0]), e[1], e[2]), [
+            ('empty', 'end of file.*Eigvals header', False),
+            ('whitespace_only', 'Eigvals header not found', False),
+            ('ordination_error1', 'Eigvals header not found', False),
+            ('ordination_error2',
+             'Proportion explained header not found', False),
+            ('ordination_error3', 'Species header not found', True),
+            ('ordination_error4', 'Site header not found', True),
+            ('ordination_error5', 'Biplot header not found', True),
+            ('ordination_error6', 'Site constraints header not found', True),
+            ('ordination_error7', 'empty line', False),
+            ('ordination_error8', '9.*Proportion explained.*8', True),
+            ('ordination_error9', '2 values.*1 in row 1', True),
+            ('ordination_error10', '2 values.*1 in row 1', True),
+            ('ordination_error11', 'Site constraints ids and site ids', True),
+            ('ordination_error12', '9.*Eigvals.*8', True),
+            ('ordination_error13', '9.*Proportion explained.*8', True),
+            ('ordination_error14', 'Site is 0: 9 x 0', True),
+            ('ordination_error15', '9 values.*8 in row 1', True),
+            ('ordination_error16', 'Biplot is 0: 3 x 0', True),
+            ('ordination_error17', '3 values.*2 in row 1', True),
+            ('ordination_error18',
+             'proportion explained.*eigvals: 8 != 9', True),
+            ('ordination_error19',
+             'coordinates.*species.*eigvals: 1 != 2', True),
+            ('ordination_error20', 'coordinates.*site.*eigvals: 1 != 2', True),
+            ('ordination_error21', 'one eigval', False),
+            ('ordination_error22', 'end of file.*blank line', False),
+            ('ordination_error23', 'end of file.*Proportion explained section',
+             True),
+            ('ordination_error24', 'end of file.*row 2.*Species section', True)
+        ])
+
+
+class OrdinationResultsReaderWriterTests(OrdinationTestData):
+    def setUp(self):
+        super(OrdinationResultsReaderWriterTests, self).setUp()
+
+        # define in-memory results, one for each of the valid files in
+        # self.valid_fps
+
+        # CA results
+        eigvals = np.array([0.0961330159181, 0.0409418140138])
+        species = np.array([[0.408869425742, 0.0695518116298],
+                            [-0.1153860437, -0.299767683538],
+                            [-0.309967102571, 0.187391917117]])
+        site = np.array([[-0.848956053187, 0.882764759014],
+                         [-0.220458650578, -1.34482000302],
+                         [1.66697179591, 0.470324389808]])
+        biplot = None
+        site_constraints = None
+        prop_explained = None
+        species_ids = ['Species1', 'Species2', 'Species3']
+        site_ids = ['Site1', 'Site2', 'Site3']
+        ca_scores = OrdinationResults(eigvals=eigvals, species=species,
+                                      site=site, biplot=biplot,
+                                      site_constraints=site_constraints,
+                                      proportion_explained=prop_explained,
+                                      species_ids=species_ids,
+                                      site_ids=site_ids)
+        # CCA results
+        eigvals = np.array([0.366135830393, 0.186887643052, 0.0788466514249,
+                            0.082287840501, 0.0351348475787, 0.0233265839374,
+                            0.0099048981912, 0.00122461669234,
+                            0.000417454724117])
+        species = np.loadtxt(
+            get_data_path('ordination_exp_Ordination_CCA_species'))
+        site = np.loadtxt(get_data_path('ordination_exp_Ordination_CCA_site'))
+        biplot = np.array([[-0.169746767979, 0.63069090084, 0.760769036049],
+                           [-0.994016563505, 0.0609533148724,
+                            -0.0449369418179],
+                           [0.184352565909, -0.974867543612, 0.0309865007541]])
+        site_constraints = np.loadtxt(
+            get_data_path('ordination_exp_Ordination_CCA_site_constraints'))
+        prop_explained = None
+        species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
+                       'Species4', 'Species5', 'Species6', 'Species7',
+                       'Species8']
+        site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
+                    'Site6', 'Site7', 'Site8', 'Site9']
+        cca_scores = OrdinationResults(eigvals=eigvals, species=species,
+                                       site=site, biplot=biplot,
+                                       site_constraints=site_constraints,
+                                       proportion_explained=prop_explained,
+                                       species_ids=species_ids,
+                                       site_ids=site_ids)
+        # PCoA results
+        eigvals = np.array([0.512367260461, 0.300719094427, 0.267912066004,
+                            0.208988681078, 0.19169895326, 0.16054234528,
+                            0.15017695712, 0.122457748167, 0.0])
+        species = None
+        site = np.loadtxt(get_data_path('ordination_exp_Ordination_PCoA_site'))
+        biplot = None
+        site_constraints = None
+        prop_explained = np.array([0.267573832777, 0.15704469605,
+                                   0.139911863774, 0.109140272454,
+                                   0.100111048503, 0.0838401161912,
+                                   0.0784269939011, 0.0639511763509, 0.0])
+        species_ids = None
+        site_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
+                    'PC.355', 'PC.607', 'PC.634']
+        pcoa_scores = OrdinationResults(eigvals=eigvals, species=species,
+                                        site=site, biplot=biplot,
+                                        site_constraints=site_constraints,
+                                        proportion_explained=prop_explained,
+                                        species_ids=species_ids,
+                                        site_ids=site_ids)
+        # RDA results
+        eigvals = np.array([25.8979540892, 14.9825779819, 8.93784077262,
+                            6.13995623072, 1.68070536498, 0.57735026919,
+                            0.275983624351])
+        species = np.loadtxt(
+            get_data_path('ordination_exp_Ordination_RDA_species'))
+        site = np.loadtxt(get_data_path('ordination_exp_Ordination_RDA_site'))
+        biplot = np.array([[0.422650019179, -0.559142585857, -0.713250678211],
+                           [0.988495963777, 0.150787422017, -0.0117848614073],
+                           [-0.556516618887, 0.817599992718, 0.147714267459],
+                           [-0.404079676685, -0.9058434809, -0.127150316558]])
+        site_constraints = np.loadtxt(
+            get_data_path('ordination_exp_Ordination_RDA_site_constraints'))
+        prop_explained = None
+        species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
+                       'Species4', 'Species5']
+        site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
+                    'Site6', 'Site7', 'Site8', 'Site9']
+        rda_scores = OrdinationResults(eigvals=eigvals, species=species,
+                                       site=site, biplot=biplot,
+                                       site_constraints=site_constraints,
+                                       proportion_explained=prop_explained,
+                                       species_ids=species_ids,
+                                       site_ids=site_ids)
+
+        self.ordination_results_objs = [ca_scores, cca_scores, pcoa_scores,
+                                        rda_scores]
+
+    def test_read_valid_files(self):
+        for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
+                obs = _ordination_to_ordination_results(fp)
+                assert_ordination_results_equal(obs, obj)
+
+    def test_read_invalid_files(self):
+        for invalid_fp, error_msg_regexp, _ in self.invalid_fps:
+            with self.assertRaisesRegexp(OrdinationFormatError,
+                                         error_msg_regexp):
+                _ordination_to_ordination_results(invalid_fp)
+
+    def test_write(self):
+        for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
+            fh = StringIO()
+            _ordination_results_to_ordination(obj, fh)
+            obs = fh.getvalue()
+            fh.close()
+
+            with open(fp, 'U') as fh:
+                exp = fh.read()
+
+            npt.assert_equal(obs, exp)
+
+    def test_roundtrip_read_write(self):
+        for fp in self.valid_fps:
+            # Read.
+            obj1 = _ordination_to_ordination_results(fp)
+
+            # Write.
+            fh = StringIO()
+            _ordination_results_to_ordination(obj1, fh)
+            fh.seek(0)
+
+            # Read.
+            obj2 = _ordination_to_ordination_results(fh)
+            fh.close()
+
+            assert_ordination_results_equal(obj1, obj2)
+
+
+class SnifferTests(OrdinationTestData):
+    def setUp(self):
+        super(SnifferTests, self).setUp()
+
+    def test_matches_and_nonmatches(self):
+        # Sniffer should match all valid files, and will match some invalid
+        # ones too because it doesn't exhaustively check the entire file.
+        for fp in self.valid_fps:
+            self.assertEqual(_ordination_sniffer(fp), (True, {}))
+
+        for fp, _, expected_sniffer_match in self.invalid_fps:
+            self.assertEqual(_ordination_sniffer(fp),
+                             (expected_sniffer_match, {}))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/io/tests/test_phylip.py b/skbio/io/tests/test_phylip.py
new file mode 100644
index 0000000..3cf9ec0
--- /dev/null
+++ b/skbio/io/tests/test_phylip.py
@@ -0,0 +1,102 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+
+from unittest import TestCase, main
+
+from skbio.io import PhylipFormatError
+from skbio.io.phylip import _alignment_to_phylip
+from skbio import Alignment, DNA, RNA
+from skbio.util import get_data_path
+
+
+class AlignmentWriterTests(TestCase):
+    def setUp(self):
+        # ids all same length, seqs longer than 10 chars
+        dna_3_seqs = Alignment([
+            DNA('..ACC-GTTGG..', id="d1"),
+            DNA('TTACCGGT-GGCC', id="d2"),
+            DNA('.-ACC-GTTGC--', id="d3")])
+
+        # id lengths from 0 to 10, with mixes of numbers, characters, and
+        # spaces. sequence characters are a mix of cases and gap characters.
+        # sequences are shorter than 10 chars
+        variable_length_ids = Alignment([
+            RNA('.-ACGU'),
+            RNA('UGCA-.', id='a'),
+            RNA('.ACGU-', id='bb'),
+            RNA('ugca-.', id='1'),
+            RNA('AaAaAa', id='abcdefghij'),
+            RNA('GGGGGG', id='ab def42ij')])
+
+        # sequences with 20 chars = exactly two chunks of size 10
+        two_chunks = Alignment([
+            DNA('..ACC-GTTGG..AATGC.C', id='foo'),
+            DNA('TTACCGGT-GGCCTA-GCAT', id='bar')])
+
+        # single sequence with more than two chunks
+        single_seq_long = Alignment([
+            DNA('..ACC-GTTGG..AATGC.C----', id='foo')])
+
+        # single sequence with only a single character (minimal writeable
+        # alignment)
+        single_seq_short = Alignment([DNA('-')])
+
+        # alignments that can be written in phylip format
+        self.objs = [dna_3_seqs, variable_length_ids, two_chunks,
+                     single_seq_long, single_seq_short]
+        self.fps = map(get_data_path,
+                       ['phylip_dna_3_seqs', 'phylip_variable_length_ids',
+                        'phylip_two_chunks', 'phylip_single_seq_long',
+                        'phylip_single_seq_short'])
+
+        # alignments that cannot be written in phylip format, paired with their
+        # expected error message regexps
+        self.invalid_objs = [
+            # no seqs
+            (Alignment([]), 'one sequence'),
+
+            # no positions
+            (Alignment([DNA('', id="d1"),
+                        DNA('', id="d2")]), 'one position'),
+
+            # ids too long
+            (Alignment([RNA('ACGU', id="foo"),
+                        RNA('UGCA', id="alongsequenceid")]),
+             '10.*alongsequenceid')
+        ]
+
+    def test_write(self):
+        for fp, obj in zip(self.fps, self.objs):
+            fh = StringIO()
+            _alignment_to_phylip(obj, fh)
+            obs = fh.getvalue()
+            fh.close()
+
+            with open(fp, 'U') as fh:
+                exp = fh.read()
+
+            self.assertEqual(obs, exp)
+
+    def test_write_invalid_alignment(self):
+        for invalid_obj, error_msg_regexp in self.invalid_objs:
+            fh = StringIO()
+            with self.assertRaisesRegexp(PhylipFormatError, error_msg_regexp):
+                _alignment_to_phylip(invalid_obj, fh)
+
+            # ensure nothing was written to the file before the error was
+            # thrown. TODO remove this check when #674 is resolved
+            obs = fh.getvalue()
+            fh.close()
+            self.assertEqual(obs, '')
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/io/tests/test_qseq.py b/skbio/io/tests/test_qseq.py
new file mode 100644
index 0000000..d98a82f
--- /dev/null
+++ b/skbio/io/tests/test_qseq.py
@@ -0,0 +1,294 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+
+from future.builtins import zip
+
+import unittest
+from skbio import (SequenceCollection, BiologicalSequence, NucleotideSequence,
+                   DNASequence, RNASequence, ProteinSequence)
+
+from skbio import read
+from skbio.util import get_data_path
+from skbio.io import QSeqFormatError
+from skbio.io.qseq import (_qseq_to_generator,
+                           _qseq_to_sequence_collection, _qseq_sniffer)
+
+
+def _drop_kwargs(kwargs, *args):
+    for arg in args:
+        if arg in kwargs:
+            kwargs.pop(arg)
+
+
+class TestQSeqBase(unittest.TestCase):
+    def setUp(self):
+        self.valid_files = [
+            (get_data_path('qseq_single_seq_sanger'), [
+                {'variant': 'sanger'},
+                {'phred_offset': 33},
+            ], [
+                ('sanger_1:3:34:-30:30#0/2',
+                 'ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGT'
+                 'ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC', [
+                     26, 26, 29, 31, 33, 34, 36, 37, 38, 39, 41, 42,
+                     43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+                     55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+                     67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+                     79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+                     91, 92, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93,
+                     93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93,
+                     93, 93, 93, 93, 93, 93, 93, 93, 93, 93])
+            ]),
+
+            (get_data_path('qseq_multi_seq_illumina1.3'), [
+                {'variant': 'illumina1.3'},
+                {'phred_offset': 64}
+            ], [
+                ('illumina_1:3:34:-30:30#0/1', 'ACG....ACGTAC', [
+                    50, 53, 2, 2, 2, 2, 50, 2, 3, 5, 6, 7, 8]),
+                ('illumina_1:3:35:-30:30#0/2', 'ACGTA.AATAAAC', [
+                    39, 37, 20, 33, 1, 33, 38, 40, 55, 49, 1, 1, 38])
+            ]),
+
+            (get_data_path('qseq_multi_seq_illumina1.3'), [
+                {'variant': 'illumina1.3', 'filter': False, 'seq_num': 1},
+                {'phred_offset': 64, 'filter': False, 'seq_num': 2},
+                {'variant': 'illumina1.3', 'filter': False, 'seq_num': 3,
+                 'constructor': ProteinSequence},
+                {'phred_offset': 64, 'filter': False, 'seq_num': 4,
+                 'constructor': DNASequence},
+            ], [
+                ('illumina_1:3:34:-30:30#0/1', 'ACG....ACGTAC', [
+                    50, 53, 2, 2, 2, 2, 50, 2, 3, 5, 6, 7, 8]),
+                ('illumina_1:3:34:30:-30#0/1', 'CGGGCATTGCA', [
+                    3, 7, 7, 7, 3, 33, 51, 36, 7, 3, 1]),
+                ('illumina_1:3:35:-30:30#0/2', 'ACGTA.AATAAAC', [
+                    39, 37, 20, 33, 1, 33, 38, 40, 55, 49, 1, 1, 38]),
+                ('illumina_1:3:35:30:-30#0/3', 'CATTTAGGA.TGCA', [
+                    52, 42, 38, 44, 43, 1, 6, 46, 43, 11, 39, 40, 54, 13])
+            ])
+        ]
+
+        self.invalid_files = [
+            (get_data_path('whitespace_only'), [
+                {},
+                {'variant': 'sanger'}
+            ], [
+                'blank line',
+            ], QSeqFormatError),
+
+            (get_data_path('tsv_10_fields'), [
+                {},
+                {'variant': 'sanger'},
+                {'variant': 'solexa'}
+            ], [
+                'read',
+                '[1, 3]'
+            ], QSeqFormatError),
+
+            (get_data_path('tsv_8_fields'), [
+                {},
+                {'variant': 'sanger'},
+                {'variant': 'solexa'}
+            ], [
+                '8',
+                '10 or 11'
+            ], QSeqFormatError),
+
+
+            (get_data_path('qseq_invalid_filter'), [
+                {},
+                {'phred_offset': 33},
+                {'variant': 'solexa'},
+                {'variant': 'illumina1.3'},
+                {'variant': 'illumina1.8'}
+            ], [
+                'filter',
+                '0 or 1',
+            ], QSeqFormatError),
+
+            (get_data_path('qseq_invalid_read'), [
+                {},
+                {'phred_offset': 33},
+                {'variant': 'solexa'},
+                {'variant': 'illumina1.3'},
+                {'variant': 'illumina1.8'}
+            ], [
+                'read',
+                '[1, 3]',
+            ], QSeqFormatError),
+
+            (get_data_path('qseq_invalid_x'), [
+                {},
+                {'phred_offset': 33},
+                {'variant': 'solexa'},
+                {'variant': 'illumina1.3'},
+                {'variant': 'illumina1.8'}
+            ], [
+                'x',
+                'integer',
+            ], QSeqFormatError),
+
+            (get_data_path('qseq_invalid_y'), [
+                {},
+                {'phred_offset': 33},
+                {'variant': 'solexa'},
+                {'variant': 'illumina1.3'},
+                {'variant': 'illumina1.8'}
+            ], [
+                'y',
+                'integer',
+            ], QSeqFormatError),
+
+            (get_data_path('qseq_invalid_lane'), [
+                {},
+                {'phred_offset': 33},
+                {'variant': 'solexa'},
+                {'variant': 'illumina1.3'},
+                {'variant': 'illumina1.8'}
+            ], [
+                'lane',
+                'positive integer',
+            ], QSeqFormatError),
+
+            (get_data_path('qseq_invalid_tile'), [
+                {},
+                {'phred_offset': 33},
+                {'variant': 'solexa'},
+                {'variant': 'illumina1.3'},
+                {'variant': 'illumina1.8'}
+            ], [
+                'tile',
+                'positive integer',
+            ], QSeqFormatError)
+        ]
+
+
+class TestQSeqToGenerator(TestQSeqBase):
+
+    def setUp(self):
+        super(TestQSeqToGenerator, self).setUp()
+        self.valid_files += [
+            (get_data_path('empty'), [{}, {'variant': 'sanger'}], [])
+        ]
+
+        self.invalid_files += [
+            (get_data_path('qseq_single_seq_sanger'), [
+                {'variant': 'illumina1.3'},
+                {'variant': 'illumina1.8'}
+            ], [
+                'out of range',
+                '[0, 62]'
+            ], ValueError)
+        ]
+
+    def test_invalid_files(self):
+        for invalid, kwargs, errors, etype in self.invalid_files:
+            with self.assertRaises(etype) as cm:
+                for kwarg in kwargs:
+                    _drop_kwargs(kwarg, 'seq_num')
+                    list(_qseq_to_generator(invalid, **kwarg))
+            for e in errors:
+                self.assertIn(e, str(cm.exception))
+
+    def test_valid_files(self):
+        for valid, kwargs, components in self.valid_files:
+            for kwarg in kwargs:
+                _drop_kwargs(kwarg, 'seq_num')
+                constructor = kwarg.get('constructor', BiologicalSequence)
+                expected = [constructor(c[1], id=c[0], quality=c[2]) for
+                            c in components]
+
+                observed = list(_qseq_to_generator(valid, **kwarg))
+                self.assertEqual(len(expected), len(observed))
+                for o, e in zip(observed, expected):
+                    self.assertTrue(o.equals(e))
+
+
+class TestQSeqToSequenceCollection(TestQSeqBase):
+    def setUp(self):
+        super(TestQSeqToSequenceCollection, self).setUp()
+        self.valid_files += [
+            (get_data_path('empty'), [{}, {'variant': 'sanger'}],
+             SequenceCollection([]))
+        ]
+
+    def test_invalid_files(self):
+        for invalid, kwargs, errors, etype in self.invalid_files:
+            with self.assertRaises(etype) as cm:
+                for kwarg in kwargs:
+                    _drop_kwargs(kwarg, 'seq_num')
+                    _qseq_to_sequence_collection(invalid, **kwarg)
+            for e in errors:
+                self.assertIn(e, str(cm.exception))
+
+    def test_valid_files(self):
+        for valid, kwargs, components in self.valid_files:
+            for kwarg in kwargs:
+                _drop_kwargs(kwarg, 'seq_num')
+                constructor = kwarg.get('constructor', BiologicalSequence)
+                expected = SequenceCollection([constructor(c[1], id=c[0],
+                                               quality=c[2]) for c in
+                                               components])
+
+                observed = _qseq_to_sequence_collection(valid, **kwarg)
+                # TODO remove when #656 is resolved
+                self.assertEqual(observed, expected)
+                for o, e in zip(observed, expected):
+                    self.assertTrue(o.equals(e))
+
+
+class TestQSeqToSequences(TestQSeqBase):
+    def test_invalid_files(self):
+        for constructor in [BiologicalSequence, NucleotideSequence,
+                            DNASequence, RNASequence, ProteinSequence]:
+            for invalid, kwargs, errors, etype in self.invalid_files:
+                with self.assertRaises(etype) as cm:
+                    for kwarg in kwargs:
+                        _drop_kwargs(kwarg, 'constructor', 'filter')
+
+                        read(invalid, format='qseq', verify=False,
+                             into=constructor, **kwarg)
+                for e in errors:
+                    self.assertIn(e, str(cm.exception))
+
+    def test_valid_files(self):
+        for constructor in [BiologicalSequence, NucleotideSequence,
+                            DNASequence, RNASequence, ProteinSequence]:
+            for valid, kwargs, components in self.valid_files:
+                for kwarg in kwargs:
+                    _drop_kwargs(kwarg, 'constructor', 'filter')
+
+                    seq_num = kwarg.get('seq_num', 1)
+                    c = components[seq_num - 1]
+                    expected = constructor(c[1], id=c[0], quality=c[2])
+
+                    observed = read(valid, into=constructor, format='qseq',
+                                    verify=False, **kwarg)
+                    self.assertTrue(observed.equals(expected))
+
+
+class TestQSeqSniffer(TestQSeqBase):
+
+    def setUp(self):
+        super(TestQSeqSniffer, self).setUp()
+        self.invalid_files += [
+            (get_data_path('empty'), None, None, None)
+        ]
+
+    def test_qseq_sniffer_valid_files(self):
+        for valid, _, _ in self.valid_files:
+            self.assertEqual(_qseq_sniffer(valid), (True, {}))
+
+    def test_qseq_sniffer_invalid_files(self):
+        for invalid, _, _, _ in self.invalid_files:
+            self.assertEqual(_qseq_sniffer(invalid), (False, {}))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/tests/test_registry.py b/skbio/io/tests/test_registry.py
new file mode 100644
index 0000000..0b7e4b5
--- /dev/null
+++ b/skbio/io/tests/test_registry.py
@@ -0,0 +1,1228 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+try:
+    # future >= 0.12
+    from future.backports.test.support import import_fresh_module
+except ImportError:
+    from future.standard_library.test.support import import_fresh_module
+from io import StringIO
+import os
+
+import unittest
+import warnings
+from tempfile import mkstemp
+
+from skbio.io import (DuplicateRegistrationError, FormatIdentificationWarning,
+                      InvalidRegistrationError, UnrecognizedFormatError,
+                      ArgumentOverrideWarning)
+from skbio.io._registry import empty_file_sniffer
+from skbio.util import TestingUtilError, get_data_path
+
+
+class TestClass(object):
+    def __init__(self, l):
+        self.list = l
+
+    def __eq__(self, other):
+        # They are only equal when the class is EXACTLY the same. We don't want
+        # readers to return knockoff instances...
+        return self.__class__ is other.__class__ and self.list == other.list
+
+    def __repr__(self):
+        return "%s(%s)" % (str(self.__class__.__name__), str(self.list))
+
+
+class TestClassA(TestClass):
+    pass
+
+
+class TestClassB(TestClass):
+    pass
+
+
+class RegistryTest(unittest.TestCase):
+    def setUp(self):
+        # A fresh module needs to be imported for each test because the
+        # registry stores its state in the module which is by default
+        # only loaded once.
+        self.module = import_fresh_module('skbio.io._registry')
+        self.fd1, self.fp1 = mkstemp()
+        self.fd2, self.fp2 = mkstemp()
+
+    def tearDown(self):
+        os.remove(self.fp1)
+        os.close(self.fd1)
+        os.remove(self.fp2)
+        os.close(self.fd2)
+
+
+class TestRegisterAndGetReader(RegistryTest):
+    def test_get_reader_no_match(self):
+        self.assertEqual(None, self.module.get_reader('not_a_format',
+                                                      TestClass))
+
+    def test_register_reader_on_generator(self):
+        @self.module.register_reader('format1')
+        def format1_reader_generator(fh):
+            yield
+
+        self.assertEqual(format1_reader_generator,
+                         self.module.get_reader('format1'))
+
+        self.assertEqual(format1_reader_generator,
+                         self.module.get_reader('format1', None))
+
+        @self.module.register_reader('format2', None)
+        def format2_reader_generator(fh):
+            yield
+
+        self.assertEqual(format2_reader_generator,
+                         self.module.get_reader('format2'))
+
+        self.assertEqual(format2_reader_generator,
+                         self.module.get_reader('format2', None))
+
+    def test_get_reader_when_only_writer_exists(self):
+        @self.module.register_writer('format', TestClass)
+        def format_reader(fh):
+            return
+
+        self.assertEqual(None, self.module.get_reader('format', TestClass))
+
+    def test_register_reader_on_many(self):
+        @self.module.register_reader('format1', TestClassA)
+        def format1_reader(fh):
+            return
+
+        @self.module.register_reader('format1', TestClassB)
+        def format1_reader_b(fh):
+            return
+
+        @self.module.register_reader('format2', TestClassA)
+        def format2_reader(fh):
+            return
+
+        @self.module.register_reader('format3', TestClassB)
+        def format3_reader(fh):
+            return
+
+        self.assertEqual(format1_reader,
+                         self.module.get_reader('format1', TestClassA))
+
+        self.assertEqual(format1_reader_b,
+                         self.module.get_reader('format1', TestClassB))
+
+        self.assertEqual(format2_reader,
+                         self.module.get_reader('format2', TestClassA))
+
+        self.assertEqual(None,
+                         self.module.get_reader('format2', TestClassB))
+
+        self.assertEqual(None,
+                         self.module.get_reader('format3', TestClassA))
+
+        self.assertEqual(format3_reader,
+                         self.module.get_reader('format3', TestClassB))
+
+    def test_register_reader_over_existing(self):
+        with self.assertRaises(DuplicateRegistrationError) as cm:
+            @self.module.register_reader('format1', TestClassA)
+            def format1_reader(fh):
+                return
+
+            @self.module.register_reader('format1', TestClassA)
+            def duplicate_format1_reader(fh):
+                return
+
+        self.assertTrue('format1' in str(cm.exception))
+        self.assertTrue('reader' in str(cm.exception))
+        self.assertTrue(TestClassA.__name__ in str(cm.exception))
+
+    def test_register_reader_generator_with_not_a_generator(self):
+        @self.module.register_reader('format')
+        def not_a_generator(fp):
+            return 'oops'
+
+        fh = StringIO()
+        with self.assertRaises(InvalidRegistrationError):
+            next(self.module.get_reader('format')(fh))
+        fh.close()
+
+
+class TestRegisterAndGetWriter(RegistryTest):
+    def test_get_writer_no_match(self):
+        self.assertEqual(None, self.module.get_writer('not_a_format',
+                                                      TestClass))
+
+    def test_get_writer_when_only_reader_exists(self):
+        @self.module.register_reader('format', TestClass)
+        def format_reader(fh):
+            return
+
+        self.assertEqual(None, self.module.get_writer('format', TestClass))
+
+    def test_register_writer_on_generator(self):
+        @self.module.register_writer('format1')
+        def format1_writer_generator(obj, fh):
+            yield
+
+        self.assertEqual(format1_writer_generator,
+                         self.module.get_writer('format1'))
+
+        self.assertEqual(format1_writer_generator,
+                         self.module.get_writer('format1', None))
+
+        @self.module.register_writer('format2', None)
+        def format2_writer_generator(obj, fh):
+            yield
+
+        self.assertEqual(format2_writer_generator,
+                         self.module.get_writer('format2'))
+
+        self.assertEqual(format2_writer_generator,
+                         self.module.get_writer('format2', None))
+
+    def test_register_writer_on_many(self):
+        @self.module.register_writer('format1', TestClassA)
+        def format1_writer(obj, fh):
+            return
+
+        @self.module.register_writer('format1', TestClassB)
+        def format1_writer_b(obj, fh):
+            return
+
+        @self.module.register_writer('format2', TestClassA)
+        def format2_writer(obj, fh):
+            return
+
+        @self.module.register_writer('format3', TestClassB)
+        def format3_writer(obj, fh):
+            return
+
+        self.assertEqual(format1_writer,
+                         self.module.get_writer('format1', TestClassA))
+
+        self.assertEqual(format1_writer_b,
+                         self.module.get_writer('format1', TestClassB))
+
+        self.assertEqual(format2_writer,
+                         self.module.get_writer('format2', TestClassA))
+
+        self.assertEqual(None,
+                         self.module.get_writer('format2', TestClassB))
+
+        self.assertEqual(None,
+                         self.module.get_writer('format3', TestClassA))
+
+        self.assertEqual(format3_writer,
+                         self.module.get_writer('format3', TestClassB))
+
+    def test_register_writer_over_existing(self):
+        with self.assertRaises(DuplicateRegistrationError) as cm:
+            @self.module.register_writer('format1', TestClassA)
+            def format1_writer(obj, fh):
+                return
+
+            @self.module.register_writer('format1', TestClassA)
+            def duplicate_format1_writer(obj, fh):
+                return
+
+        self.assertTrue('format1' in str(cm.exception))
+        self.assertTrue('writer' in str(cm.exception))
+        self.assertTrue(TestClassA.__name__ in str(cm.exception))
+
+    def test_register_writer_over_existing_generator(self):
+        with self.assertRaises(DuplicateRegistrationError) as cm:
+            @self.module.register_writer('format1')
+            def format1_writer(obj, fh):
+                return
+
+            @self.module.register_writer('format1')
+            def duplicate_format1_writer(obj, fh):
+                return
+
+        self.assertTrue('format1' in str(cm.exception))
+        self.assertTrue('writer' in str(cm.exception))
+        self.assertTrue('generator' in str(cm.exception))
+
+
+class TestRegisterAndGetSniffer(RegistryTest):
+    def test_get_sniffer_no_match(self):
+        self.assertEqual(None, self.module.get_sniffer('not_a_format'))
+
+    def test_register_sniffer_on_many(self):
+        @self.module.register_sniffer('format1')
+        def format1_sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_sniffer('format2')
+        def format2_sniffer(fh):
+            return '2' in fh.readline(), {}
+
+        @self.module.register_sniffer('format3')
+        def format3_sniffer(fh):
+            return '3' in fh.readline(), {}
+
+        self.assertEqual(format1_sniffer,
+                         self.module.get_sniffer('format1'))
+
+        self.assertEqual(format2_sniffer,
+                         self.module.get_sniffer('format2'))
+
+        self.assertEqual(format3_sniffer,
+                         self.module.get_sniffer('format3'))
+
+    def test_register_sniffer_over_existing(self):
+        with self.assertRaises(DuplicateRegistrationError) as cm:
+            @self.module.register_sniffer('format1')
+            def format1_sniffer(fh):
+                return False, {}
+
+            @self.module.register_sniffer('format1')
+            def duplicate_format1_sniffer(fh):
+                return False, {}
+
+        self.assertTrue('format1' in str(cm.exception))
+
+    def test_sniffer_warns_on_exception(self):
+        @self.module.register_sniffer('format')
+        def format_sniffer(fh):
+            raise TestingUtilError("Sniffer will return False and warn.")
+
+        fh = StringIO()
+        sniffer = self.module.get_sniffer('format')
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("error")
+            with self.assertRaises(FormatIdentificationWarning):
+                sniffer(fh)
+
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("ignore")
+            result, kwargs = sniffer(fh)
+            self.assertFalse(result)
+            self.assertEqual({}, kwargs)
+
+        fh.close()
+
+
+class TestListReadFormats(RegistryTest):
+    def test_no_read_formats(self):
+        @self.module.register_reader('format1', TestClassA)
+        def this_isnt_on_clsB(fh):
+            return
+
+        self.assertEqual([], self.module.list_read_formats(TestClassB))
+
+    def test_one_read_format(self):
+        @self.module.register_reader('format1', TestClass)
+        def format1_cls(fh):
+            return
+
+        self.assertEqual(['format1'], self.module.list_read_formats(TestClass))
+
+    def test_many_read_formats(self):
+        @self.module.register_reader('format1', TestClassA)
+        def format1_clsA(fh):
+            return
+
+        @self.module.register_reader('format2', TestClassA)
+        def format2_clsA(fh):
+            return
+
+        @self.module.register_reader('format3', TestClassA)
+        def format3_clsA(fh):
+            return
+
+        @self.module.register_reader('format3', TestClassB)
+        def format3_clsB(fh):
+            return
+
+        @self.module.register_reader('format4', TestClassB)
+        def format4_clsB(fh):
+            return
+
+        @self.module.register_writer('format5', TestClassA)
+        def format5_clsA(fh):
+            return
+
+        formats = self.module.list_read_formats(TestClassA)
+        self.assertTrue('format1' in formats)
+        self.assertTrue('format2' in formats)
+        self.assertTrue('format3' in formats)
+        self.assertTrue('format4' not in formats)
+        self.assertTrue('format5' not in formats)
+
+
+class TestListWriteFormats(RegistryTest):
+    def test_no_write_formats(self):
+        @self.module.register_writer('format1', TestClassA)
+        def this_isnt_on_clsB(fh):
+            return
+
+        self.assertEqual([], self.module.list_write_formats(TestClassB))
+
+    def test_one_write_format(self):
+        @self.module.register_writer('format1', TestClass)
+        def format1_cls(fh):
+            return
+
+        self.assertEqual(['format1'],
+                         self.module.list_write_formats(TestClass))
+
+    def test_many_write_formats(self):
+        @self.module.register_writer('format1', TestClassA)
+        def format1_clsA(fh):
+            return
+
+        @self.module.register_writer('format2', TestClassA)
+        def format2_clsA(fh):
+            return
+
+        @self.module.register_writer('format3', TestClassA)
+        def format3_clsA(fh):
+            return
+
+        @self.module.register_writer('format3', TestClassB)
+        def format3_clsB(fh):
+            return
+
+        @self.module.register_writer('format4', TestClassB)
+        def format4_clsB(fh):
+            return
+
+        @self.module.register_reader('format5', TestClassA)
+        def format5_clsA(fh):
+            return
+
+        formats = self.module.list_write_formats(TestClassA)
+
+        self.assertTrue('format1' in formats)
+        self.assertTrue('format2' in formats)
+        self.assertTrue('format3' in formats)
+        self.assertTrue('format4' not in formats)
+        self.assertTrue('format5' not in formats)
+
+
+class TestSniff(RegistryTest):
+    def setUp(self):
+        super(TestSniff, self).setUp()
+
+        @self.module.register_sniffer('format1')
+        def format1_sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_sniffer('format2')
+        def format2_sniffer(fh):
+            return '2' in fh.readline(), {}
+
+        @self.module.register_sniffer('format3')
+        def format3_sniffer(fh):
+            return '3' in fh.readline(), {}
+
+        @self.module.register_sniffer('format4')
+        def format4_sniffer(fh):
+            return '4' in fh.readline(), {}
+
+        @self.module.register_reader('format3', TestClass)
+        def reader3(fh):
+            return
+
+        @self.module.register_reader('format4', TestClass)
+        def reader4(fh):
+            return
+
+    def test_no_matches(self):
+        fh = StringIO(u"no matches here")
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.sniff(fh)
+        self.assertTrue(str(fh) in str(cm.exception))
+
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.sniff(fh, cls=TestClass)
+
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.sniff(fh, cls=TestClassB)
+
+        fh.close()
+
+    def test_one_match(self):
+        fh = StringIO(u"contains a 3")
+        self.assertEqual('format3', self.module.sniff(fh)[0])
+
+    def test_many_matches(self):
+        fh = StringIO(u"1234 will match all")
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.sniff(fh)
+        self.assertTrue("format1" in str(cm.exception))
+        self.assertTrue("format2" in str(cm.exception))
+        self.assertTrue("format3" in str(cm.exception))
+        self.assertTrue("format4" in str(cm.exception))
+        fh.close()
+
+    def test_no_matches_w_cls(self):
+        fh = StringIO(u"no matches here")
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.sniff(fh, cls=TestClass)
+        self.assertTrue(str(fh) in str(cm.exception))
+        fh.close()
+
+    def test_one_match_w_cls(self):
+        fh = StringIO(u"contains a 3")
+        self.assertEqual('format3',
+                         self.module.sniff(fh, cls=TestClass)[0])
+
+    def test_many_matches_w_cls(self):
+        fh = StringIO(u"1234 will only format3 and format4 w/ class")
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.sniff(fh, cls=TestClass)
+        self.assertTrue("format1" not in str(cm.exception))
+        self.assertTrue("format2" not in str(cm.exception))
+        # Only format3 and format4 have a definition for the provided class.
+        self.assertTrue("format3" in str(cm.exception))
+        self.assertTrue("format4" in str(cm.exception))
+        fh.close()
+
+    def test_that_mode_is_used(self):
+        fp = self.fp1
+        with open(fp, 'w') as fh:
+            fh.write('@\n#\n')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            self.assertEqual(self.expected_mode, fh.mode)
+            return '@' in fh.readline(), {}
+
+        self.expected_mode = 'U'
+        self.module.sniff(fp)
+
+        self.expected_mode = 'r'
+        self.module.sniff(fp, mode='r')
+
+    def test_position_not_mutated_real_file(self):
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return True, {}
+
+        with open(get_data_path('real_file')) as fh:
+            fh.seek(2)
+            self.module.sniff(fh)
+            self.assertEqual('b\n', next(fh))
+
+    def test_position_not_mutated_fileish(self):
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return True, {}
+
+        fh = StringIO(u'a\nb\nc\nd\n')
+        fh.seek(2)
+        self.module.sniff(fh)
+        self.assertEqual('b\n', next(fh))
+
+
+class TestRead(RegistryTest):
+    def test_format_and_into_are_none(self):
+        fh = StringIO()
+        with self.assertRaises(ValueError):
+            self.module.read(fh)
+
+        fh.close()
+
+    def test_format_is_none(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh):
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        instance = self.module.read(fh, into=TestClass)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+        fh.close()
+
+    def test_into_is_none(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_reader('format')
+        def reader(fh):
+            for value in [int(x) for x in fh.read().split('\n')]:
+                yield value
+
+        generator = self.module.read(fh, format='format')
+        first_run = True
+        for a, b in zip(generator, [1, 2, 3, 4]):
+            if first_run:
+                fh.seek(3)
+                first_run = False
+            self.assertEqual(a, b)
+            self.assertEqual(3, fh.tell())
+        fh.close()
+
+    def test_into_is_none_real_file(self):
+        fp = self.fp1
+        with open(fp, 'w') as fh:
+            fh.write('1\n2\n3\n4')
+
+        self._test_fh = None
+
+        @self.module.register_reader('format')
+        def reader(fh):
+            self._test_fh = fh
+            for value in [int(x) for x in fh.read().split('\n')]:
+                yield value
+
+        generator = self.module.read(fp, format='format')
+        for a, b in zip(generator, [1, 2, 3, 4]):
+            self.assertEqual(a, b)
+        self.assertTrue(self._test_fh.closed)
+
+    def test_reader_does_not_exist(self):
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.read(None, format='not_a_format', into=TestClass)
+
+        self.assertTrue(TestClass.__name__ in str(cm.exception))
+        self.assertTrue('not_a_format' in str(cm.exception))
+
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.read(None, format='not_a_format2')
+
+        self.assertTrue('generator' in str(cm.exception))
+        self.assertTrue('not_a_format2' in str(cm.exception))
+
+    def test_reader_is_not_generator(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format')
+        def reader(fh):
+            # Not a generator!
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        with self.assertRaises(InvalidRegistrationError):
+            next(self.module.read(fh, format='format'))
+
+        fh.close()
+
+    def test_reader_empty_file(self):
+        fh = StringIO()
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return False, {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh):
+            return
+
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.read(fh, into=TestClass)
+        self.assertIn('<emptyfile>', str(cm.exception))
+
+        fh.close()
+
+    def test_reader_exists_with_verify_true(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            self.was_verified = True
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh):
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        self.was_verified = False
+        instance = self.module.read(fh, format='format', into=TestClass,
+                                    verify=True)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+        self.assertTrue(self.was_verified)
+
+        # Remove if read-context management is support in the future.
+        fh.seek(0)
+
+        self.was_verified = False
+        instance = self.module.read(fh, format='format', into=TestClass)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+        self.assertTrue(self.was_verified)
+
+        fh.close()
+
+    def test_warning_raised(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            self.was_verified = True
+            return False, {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh):
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("error")
+            with self.assertRaises(FormatIdentificationWarning):
+                self.was_verified = False
+                instance = self.module.read(fh, format='format',
+                                            into=TestClass, verify=True)
+                self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+                self.assertTrue(self.was_verified)
+
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("error")
+            with self.assertRaises(FormatIdentificationWarning):
+                self.was_verified = False
+                instance = self.module.read(fh, format='format',
+                                            into=TestClass)
+                self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+                self.assertTrue(self.was_verified)
+
+        fh.close()
+
+    def test_reader_exists_with_verify_false(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            self.was_verified = True
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh):
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        self.was_verified = False
+        instance = self.module.read(fh, format='format', into=TestClass,
+                                    verify=False)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+        self.assertFalse(self.was_verified)
+        fh.close()
+
+    def test_reader_exists_real_file(self):
+        fp = self.fp1
+        with open(fp, 'w') as fh:
+            fh.write('1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh):
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        instance = self.module.read(fp, format='format', into=TestClass)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+
+    def test_read_kwargs_passed_generator(self):
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return True, {'arg1': 15, 'arg2': 'abc'}
+
+        @self.module.register_reader('format')
+        def reader(fh, **kwargs):
+            self.assertEqual(kwargs['arg1'], 15)
+            self.assertEqual(kwargs['arg2'], 'abc')
+            self.assertEqual(kwargs['arg3'], [1])
+            yield
+
+        next(self.module.read(StringIO(), format='format', arg3=[1]))
+
+    def test_read_kwargs_passed_and_override(self):
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return True, {'arg1': 15, 'arg2': 'abc', 'override': 30}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh, **kwargs):
+            self.assertEqual(kwargs['arg1'], 15)
+            self.assertEqual(kwargs['arg2'], 'abc')
+            self.assertEqual(kwargs['arg3'], [1])
+            return
+
+        self.module.read(StringIO(u'notempty'), into=TestClass, arg3=[1])
+
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("error")
+            # Should raise no warning and thus no error.
+            self.module.read(StringIO(u'notempty'), into=TestClass, arg3=[1],
+                             override=30)
+            # Should raise a warning and thus an error.
+            with self.assertRaises(ArgumentOverrideWarning):
+                self.module.read(StringIO(u'notempty'), into=TestClass,
+                                 arg3=[1], override=100)
+
+    def test_that_mode_is_used(self):
+        fp = self.fp1
+        with open(fp, 'w') as fh:
+            fh.write('1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh):
+            self.assertEqual(self.expected_mode, fh.mode)
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        self.expected_mode = 'U'
+        instance = self.module.read(fp, format='format', into=TestClass)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+
+        self.expected_mode = 'r'
+        instance = self.module.read(fp, format='format', into=TestClass,
+                                    mode='r')
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+
+    def test_file_sentinel_many(self):
+        extra = get_data_path('real_file')
+        extra_2 = get_data_path('real_file_2')
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
+            self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        instance = self.module.read(fh, format='format', into=TestClass,
+                                    extra=extra, extra_2=extra_2)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+
+        fh.close()
+
+    def test_file_sentinel_converted_to_none(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertIsNone(extra)
+            self.assertIsNone(extra_2)
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        instance = self.module.read(fh, format='format', into=TestClass)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+
+        fh.close()
+
+    def test_file_sentinel_pass_none(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format', TestClass)
+        def reader(fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertIsNone(extra)
+            self.assertIsNone(extra_2)
+            return TestClass([int(x) for x in fh.read().split('\n')])
+
+        instance = self.module.read(fh, format='format', into=TestClass,
+                                    extra=None)
+        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+
+        fh.close()
+
+    def test_file_sentinel_generator_many(self):
+        extra = get_data_path('real_file')
+        extra_2 = get_data_path('real_file_2')
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format')
+        def reader(fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
+            self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
+            yield TestClass([int(x) for x in fh.read().split('\n')])
+
+        gen = self.module.read(fh, format='format', extra=extra,
+                               extra_2=extra_2)
+        self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
+
+        fh.close()
+
+    def test_file_sentinel_converted_to_none_generator(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format')
+        def reader(fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertIsNone(extra)
+            self.assertIsNone(extra_2)
+            yield TestClass([int(x) for x in fh.read().split('\n')])
+
+        gen = self.module.read(fh, format='format')
+        self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
+
+        fh.close()
+
+    def test_file_sentinel_pass_none_generator(self):
+        fh = StringIO(u'1\n2\n3\n4')
+
+        @self.module.register_sniffer('format')
+        def sniffer(fh):
+            return '1' in fh.readline(), {}
+
+        @self.module.register_reader('format')
+        def reader(fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertIsNone(extra)
+            self.assertIsNone(extra_2)
+            yield TestClass([int(x) for x in fh.read().split('\n')])
+
+        gen = self.module.read(fh, format='format', extra=None)
+        self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
+
+        fh.close()
+
+
+class TestWrite(RegistryTest):
+    def test_writer_does_not_exist(self):
+        fh = StringIO()
+        with self.assertRaises(UnrecognizedFormatError) as cm:
+            self.module.write({}, format='not_a_format', into=fh)
+
+        self.assertTrue('not_a_format' in str(cm.exception))
+        self.assertTrue(str(fh) in str(cm.exception))
+        fh.close()
+
+    def test_writer_exists(self):
+        obj = TestClass(['1', '2', '3', '4'])
+        fh = StringIO()
+
+        @self.module.register_writer('format', TestClass)
+        def writer(obj, fh):
+            fh.write(u'\n'.join(obj.list))
+
+        self.module.write(obj, format='format', into=fh)
+        fh.seek(0)
+        self.assertEqual("1\n2\n3\n4", fh.read())
+        fh.close()
+
+    def test_writer_exists_real_file(self):
+        obj = TestClass(['1', '2', '3', '4'])
+        fp = self.fp1
+
+        @self.module.register_writer('format', TestClass)
+        def writer(obj, fh):
+            fh.write('\n'.join(obj.list))
+
+        self.module.write(obj, format='format', into=fp)
+
+        with open(fp, 'U') as fh:
+            self.assertEqual("1\n2\n3\n4", fh.read())
+
+    def test_writer_passed_kwargs(self):
+        @self.module.register_reader('format')
+        def reader(fh):
+            yield
+
+        @self.module.register_writer('format')
+        def writer(obj, fh, **kwargs):
+            self.assertEqual(kwargs['passed'], True)
+
+        generator = self.module.get_reader('format')(None)
+        self.module.write(generator, format='format',
+                          into=StringIO(), passed=True)
+
+    def test_that_mode_is_used(self):
+        obj = TestClass(['1', '2', '3', '4'])
+        fp = self.fp1
+
+        @self.module.register_writer('format', TestClass)
+        def writer(obj, fh):
+            fh.write('\n'.join(obj.list))
+            self.assertEqual(self.expected_mode, fh.mode)
+
+        self.expected_mode = 'w'
+        self.module.write(obj, format='format', into=fp)
+
+        with open(fp, 'U') as fh:
+            self.assertEqual("1\n2\n3\n4", fh.read())
+
+        fp = self.fp2
+        self.expected_mode = 'a'
+        self.module.write(obj, format='format', into=fp, mode='a')
+
+        with open(fp, 'U') as fh:
+            self.assertEqual("1\n2\n3\n4", fh.read())
+
+    def test_file_sentinel_many(self):
+        fh = StringIO()
+
+        @self.module.register_writer('format', TestClass)
+        def writer(obj, fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            extra.write('oh yeah...')
+            extra_2.write('oh no...')
+
+        self.module.write(TestClass([]), format='format', into=fh,
+                          extra=self.fp1, extra_2=self.fp2)
+        with open(self.fp1) as f1:
+            self.assertEqual('oh yeah...', f1.read())
+
+        with open(self.fp2) as f2:
+            self.assertEqual('oh no...', f2.read())
+
+        fh.close()
+
+    def test_file_sentinel_converted_to_none(self):
+        fh = StringIO()
+
+        @self.module.register_writer('format', TestClass)
+        def writer(obj, fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertIsNone(extra)
+            self.assertIsNone(extra_2)
+
+        self.module.write(TestClass([]), format='format', into=fh)
+
+        fh.close()
+
+    def test_file_sentinel_pass_none(self):
+        fh = StringIO()
+
+        @self.module.register_writer('format', TestClass)
+        def writer(obj, fh, extra=self.module.FileSentinel, other=2,
+                   extra_2=self.module.FileSentinel):
+            self.assertIsNone(extra)
+            self.assertIsNone(extra_2)
+
+        self.module.write(TestClass([]), format='format', into=fh, extra=None)
+
+        fh.close()
+
+
+class TestInitializeOOPInterface(RegistryTest):
+    def setUp(self):
+        super(TestInitializeOOPInterface, self).setUp()
+
+        class UnassumingClass(object):
+            pass
+
+        class ClassWithDefault(object):
+            default_write_format = 'favfmt'
+
+        self.unassuming_class = UnassumingClass
+        self.class_with_default = ClassWithDefault
+
+    def test_no_readers_writers(self):
+        self.module.initialize_oop_interface()
+        self.assertFalse(hasattr(self.unassuming_class, 'read'))
+        self.assertFalse(hasattr(self.unassuming_class, 'write'))
+        self.assertFalse(hasattr(self.class_with_default, 'read'))
+        self.assertFalse(hasattr(self.class_with_default, 'write'))
+
+    def test_readers_only(self):
+        @self.module.register_reader('favfmt', self.unassuming_class)
+        def fvfmt_to_unasumming_class(fh):
+            return
+
+        @self.module.register_reader('favfmt')
+        def fvfmt_to_gen(fh):
+            yield
+
+        @self.module.register_reader('favfmt2', self.unassuming_class)
+        def fvfmt2_to_unasumming_class(fh):
+            return
+
+        self.module.initialize_oop_interface()
+
+        self.assertTrue(hasattr(self.unassuming_class, 'read'))
+        self.assertFalse(hasattr(self.unassuming_class, 'write'))
+        self.assertFalse(hasattr(self.class_with_default, 'read'))
+        self.assertFalse(hasattr(self.class_with_default, 'write'))
+
+        self.assertIn('favfmt', self.unassuming_class.read.__doc__)
+        self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
+
+    def test_writers_only(self):
+        @self.module.register_writer('favfmt', self.class_with_default)
+        def favfmt(fh):
+            pass
+
+        @self.module.register_writer('favfmt')
+        def gen_to_favfmt(fh):
+            pass
+
+        @self.module.register_writer('favfmt2', self.class_with_default)
+        def favfmt2(fh):
+            pass
+
+        self.module.initialize_oop_interface()
+
+        self.assertFalse(hasattr(self.unassuming_class, 'read'))
+        self.assertFalse(hasattr(self.unassuming_class, 'write'))
+        self.assertFalse(hasattr(self.class_with_default, 'read'))
+        self.assertTrue(hasattr(self.class_with_default, 'write'))
+
+        self.assertIn('favfmt', self.class_with_default.write.__doc__)
+        self.assertIn('favfmt2', self.class_with_default.write.__doc__)
+
+    def test_writers_no_default_format(self):
+        @self.module.register_writer('favfmt', self.unassuming_class)
+        def favfmt(fh):
+            pass
+
+        @self.module.register_writer('favfmt')
+        def gen_to_favfmt(fh):
+            pass
+
+        @self.module.register_writer('favfmt2', self.unassuming_class)
+        def favfmt2(fh):
+            pass
+        with self.assertRaises(NotImplementedError) as cm:
+            self.module.initialize_oop_interface()
+
+        self.assertIn('default_write_format', str(cm.exception))
+
+    def test_readers_writers(self):
+        @self.module.register_reader('favfmt', self.unassuming_class)
+        def fvfmt_to_unasumming_class(fh):
+            return
+
+        @self.module.register_reader('favfmt', self.class_with_default)
+        def fvfmt_to_class_w_default(fh):
+            return
+
+        @self.module.register_reader('favfmt')
+        def fvfmt_to_gen(fh):
+            yield
+
+        @self.module.register_reader('favfmt2', self.unassuming_class)
+        def fvfmt2_to_unasumming_class(fh):
+            return
+
+        @self.module.register_reader('favfmt2', self.class_with_default)
+        def fvfmt2_to_class_w_default(fh):
+            return
+
+        @self.module.register_writer('favfmt', self.class_with_default)
+        def favfmt(fh):
+            pass
+
+        @self.module.register_writer('favfmt')
+        def gen_to_favfmt(fh):
+            pass
+
+        @self.module.register_writer('favfmt2', self.class_with_default)
+        def favfmt2(fh):
+            pass
+
+        self.module.initialize_oop_interface()
+
+        self.assertTrue(hasattr(self.unassuming_class, 'read'))
+        self.assertFalse(hasattr(self.unassuming_class, 'write'))
+
+        self.assertTrue(hasattr(self.class_with_default, 'read'))
+        self.assertTrue(hasattr(self.class_with_default, 'write'))
+
+        self.assertIn('favfmt', self.unassuming_class.read.__doc__)
+        self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
+
+        self.assertIn('favfmt', self.class_with_default.read.__doc__)
+        self.assertIn('favfmt2', self.class_with_default.read.__doc__)
+
+        self.assertIn('favfmt', self.class_with_default.write.__doc__)
+        self.assertIn('favfmt2', self.class_with_default.write.__doc__)
+
+    def test_read_kwargs_passed(self):
+        self.was_called = False
+
+        @self.module.register_sniffer('favfmt')
+        def fvfmt_sniffer(fh):
+            return True, {}
+
+        @self.module.register_reader('favfmt', self.class_with_default)
+        def fvfmt_to_class_w_default(fh, **kwargs):
+            self.assertEqual('a', kwargs['a'])
+            self.assertEqual(123, kwargs['b'])
+            self.was_called = True
+
+        self.module.initialize_oop_interface()
+        fh = StringIO(u'notempty')
+        self.class_with_default.read(fh, a='a', b=123)
+
+        self.assertTrue(self.was_called)
+        fh.close()
+
+    def test_write_kwargs_passed(self):
+        self.was_called = False
+
+        @self.module.register_writer('favfmt', self.class_with_default)
+        def favfmt(obj, fh, **kwargs):
+            self.assertEqual('a', kwargs['a'])
+            self.assertEqual(123, kwargs['b'])
+            self.was_called = True
+
+        self.module.initialize_oop_interface()
+        fh = StringIO()
+        self.class_with_default().write(fh, a='a', b=123)
+
+        self.assertTrue(self.was_called)
+        fh.close()
+
+
+class TestEmptyFileSniffer(unittest.TestCase):
+    def test_blank_file(self):
+        fh = StringIO()
+        self.assertTrue(empty_file_sniffer(fh)[0])
+        fh.close()
+
+    def test_whitespace_file(self):
+        fh = StringIO(u' ')
+        self.assertTrue(empty_file_sniffer(fh)[0])
+        fh.close()
+        fh = StringIO(u'\n')
+        self.assertTrue(empty_file_sniffer(fh)[0])
+        fh.close()
+        fh = StringIO(u'\t')
+        self.assertTrue(empty_file_sniffer(fh)[0])
+        fh.close()
+
+    def test_mixed_whitespace_file(self):
+        fh = StringIO(u'\n\n\t\n \t \t \n \n \n\n')
+        self.assertTrue(empty_file_sniffer(fh)[0])
+        fh.close()
+
+    def test_not_empty_file(self):
+        fh = StringIO(u'\n\n\t\n a\t \t \n \n \n\n')
+        self.assertFalse(empty_file_sniffer(fh)[0])
+        fh.close()
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/tests/test_util.py b/skbio/io/tests/test_util.py
new file mode 100644
index 0000000..797cb9d
--- /dev/null
+++ b/skbio/io/tests/test_util.py
@@ -0,0 +1,123 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from six import StringIO, BytesIO
+
+import unittest
+import tempfile
+
+from skbio.io.util import open_file, open_files, _is_string_or_bytes
+
+
+class TestFilePathOpening(unittest.TestCase):
+    def test_is_string_or_bytes(self):
+        self.assertTrue(_is_string_or_bytes('foo'))
+        self.assertTrue(_is_string_or_bytes(u'foo'))
+        self.assertTrue(_is_string_or_bytes(b'foo'))
+        self.assertFalse(_is_string_or_bytes(StringIO('bar')))
+        self.assertFalse(_is_string_or_bytes([1]))
+
+    def test_file_closed(self):
+        """File gets closed in decorator"""
+        f = tempfile.NamedTemporaryFile('r')
+        filepath = f.name
+        with open_file(filepath) as fh:
+            pass
+        self.assertTrue(fh.closed)
+
+    def test_file_closed_harder(self):
+        """File gets closed in decorator, even if exceptions happen."""
+        f = tempfile.NamedTemporaryFile('r')
+        filepath = f.name
+        try:
+            with open_file(filepath) as fh:
+                raise TypeError
+        except TypeError:
+            self.assertTrue(fh.closed)
+        else:
+            # If we're here, no exceptions have been raised inside the
+            # try clause, so the context manager swallowed them. No
+            # good.
+            raise Exception("`open_file` didn't propagate exceptions")
+
+    def test_filehandle(self):
+        """Filehandles slip through untouched"""
+        with tempfile.TemporaryFile('r') as fh:
+            with open_file(fh) as ffh:
+                self.assertTrue(fh is ffh)
+            # And it doesn't close the file-handle
+            self.assertFalse(fh.closed)
+
+    def test_StringIO(self):
+        """StringIO (useful e.g. for testing) slips through."""
+        f = StringIO("File contents")
+        with open_file(f) as fh:
+            self.assertTrue(fh is f)
+
+    def test_BytesIO(self):
+        """BytesIO (useful e.g. for testing) slips through."""
+        f = BytesIO(b"File contents")
+        with open_file(f) as fh:
+            self.assertTrue(fh is f)
+
+
+class TestFilePathsOpening(unittest.TestCase):
+    def test_files_closed(self):
+        """File gets closed in decorator"""
+        f = tempfile.NamedTemporaryFile('r')
+        f2 = tempfile.NamedTemporaryFile('r')
+        filepath = f.name
+        filepath2 = f2.name
+        with open_files([filepath, filepath2]) as fhs:
+            pass
+        for fh in fhs:
+            self.assertTrue(fh.closed)
+
+    def test_files_closed_harder(self):
+        """File gets closed in decorator, even if exceptions happen."""
+        f = tempfile.NamedTemporaryFile('r')
+        f2 = tempfile.NamedTemporaryFile('r')
+        filepath = f.name
+        filepath2 = f2.name
+        try:
+            with open_files([filepath, filepath2]) as fhs:
+                raise TypeError
+        except TypeError:
+            for fh in fhs:
+                self.assertTrue(fh.closed)
+        else:
+            # If we're here, no exceptions have been raised inside the
+            # try clause, so the context manager swallowed them. No
+            # good.
+            raise Exception("`open_file` didn't propagate exceptions")
+
+    def test_filehandle(self):
+        """Filehandles slip through untouched"""
+        with tempfile.TemporaryFile('r') as fh:
+            with tempfile.TemporaryFile('r') as fh2:
+                with open_file([fh, fh2]) as fhs:
+                    self.assertTrue(fh is fhs[0])
+                    self.assertTrue(fh2 is fhs[1])
+                # And it doesn't close the file-handle
+                for fh in fhs:
+                    self.assertFalse(fh.closed)
+
+    def test_StringIO(self):
+        """StringIO (useful e.g. for testing) slips through."""
+        f = StringIO("File contents")
+        with open_files([f]) as fhs:
+            self.assertTrue(fhs[0] is f)
+
+    def test_BytesIO(self):
+        """BytesIO (useful e.g. for testing) slips through."""
+        f = BytesIO(b"File contents")
+        with open_files([f]) as fhs:
+            self.assertTrue(fhs[0] is f)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/util.py b/skbio/io/util.py
new file mode 100644
index 0000000..362d19d
--- /dev/null
+++ b/skbio/io/util.py
@@ -0,0 +1,99 @@
+r"""
+I/O utils (:mod:`skbio.io.util`)
+================================
+
+.. currentmodule:: skbio.io.util
+
+This module provides utility functions to deal with files and I/O in
+general.
+
+Functions
+---------
+
+.. autosummary::
+    :toctree: generated/
+
+    open_file
+    open_files
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from future.builtins import bytes, str
+
+from contextlib import contextmanager
+
+
+def _is_string_or_bytes(s):
+    """Returns True if input argument is string (unicode or not) or bytes.
+    """
+    return isinstance(s, str) or isinstance(s, bytes)
+
+
+def _get_filehandle(filepath_or, *args, **kwargs):
+    """Open file if `filepath_or` looks like a string/unicode/bytes, else
+    pass through.
+    """
+    if _is_string_or_bytes(filepath_or):
+        fh, own_fh = open(filepath_or, *args, **kwargs), True
+    else:
+        fh, own_fh = filepath_or, False
+    return fh, own_fh
+
+
+ at contextmanager
+def open_file(filepath_or, *args, **kwargs):
+    """Context manager, like ``open``, but lets file handles and file like
+    objects pass untouched.
+
+    It is useful when implementing a function that can accept both
+    strings and file-like objects (like numpy.loadtxt, etc).
+
+    Parameters
+    ----------
+    filepath_or : str/bytes/unicode string or file-like
+         If string, file to be opened using ``open``. Else, it is returned
+         untouched.
+
+    Other parameters
+    ----------------
+    args, kwargs : tuple, dict
+        When `filepath_or` is a string, any extra arguments are passed
+        on to the ``open`` builtin.
+
+    Examples
+    --------
+    >>> with open_file('filename') as f:  # doctest: +SKIP
+    ...     pass
+    >>> fh = open('filename')             # doctest: +SKIP
+    >>> with open_file(fh) as f:          # doctest: +SKIP
+    ...     pass
+    >>> fh.closed                         # doctest: +SKIP
+    False
+    >>> fh.close()                        # doctest: +SKIP
+
+    """
+    fh, own_fh = _get_filehandle(filepath_or, *args, **kwargs)
+    try:
+        yield fh
+    finally:
+        if own_fh:
+            fh.close()
+
+
+ at contextmanager
+def open_files(fp_list, *args, **kwargs):
+    fhs, owns = zip(*[_get_filehandle(f, *args, **kwargs) for f in fp_list])
+    try:
+        yield fhs
+    finally:
+        for fh, is_own in zip(fhs, owns):
+            if is_own:
+                fh.close()
diff --git a/skbio/parse/__init__.py b/skbio/parse/__init__.py
new file mode 100644
index 0000000..18cec47
--- /dev/null
+++ b/skbio/parse/__init__.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+test = Tester().test
diff --git a/skbio/parse/record.py b/skbio/parse/record.py
new file mode 100644
index 0000000..03195e7
--- /dev/null
+++ b/skbio/parse/record.py
@@ -0,0 +1,491 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from future.utils import viewitems
+
+from numbers import Integral
+from copy import deepcopy
+
+from skbio.io import FieldError
+
+
+def string_and_strip(*items):
+    """Converts items to strings and strips them."""
+    return [str(i).strip() for i in items]
+
+
+def DelimitedSplitter(delimiter=None, max_splits=1):
+    """Returns function that returns stripped fields split by delimiter.
+
+    Unlike the default behavior of split, max_splits can be negative, in
+    which case it counts from the end instead of the start (i.e. splits
+    at the _last_ delimiter, last two delimiters, etc. for -1, -2, etc.)
+    However, if the delimiter is None (the default) and max_splits is
+    negative, will not preserve internal spaces.
+
+    Note: leaves empty fields in place.
+    """
+    is_int = isinstance(max_splits, Integral)
+    if is_int and (max_splits > 0):
+        def parser(line):
+            return [i.strip() for i in line.split(delimiter, max_splits)]
+    elif is_int and (max_splits < 0):
+        def parser(line):
+            to_insert = delimiter or ' '  # re-join fields w/ space if None
+            fields = line.split(delimiter)
+            if (fields == []) or (fields == ['']):
+                return []  # empty string or only delimiter: return nothing
+            # if not enough fields, count from the start, not the end
+            if len(fields) < max_splits:
+                first_fields = fields[0]
+                last_fields = fields[1:]
+            # otherwise, count off the last n fields and join the remainder
+            else:
+                first_fields = fields[:max_splits]
+                last_fields = fields[max_splits:]
+            pieces = []
+            # if first_fields is empty, don't make up an extra empty string
+            if first_fields:
+                pieces.append(to_insert.join(first_fields))
+            pieces.extend(last_fields)
+            return [i.strip() for i in pieces]
+
+    else:  # ignore max_splits if it was 0
+        def parser(line):
+            return [i.strip() for i in line.split(delimiter)]
+    return parser
+
+# The following provide examples of the kinds of functions DelimitedSplitter
+# returns.
+semi_splitter = DelimitedSplitter(';', None)
+space_pairs = DelimitedSplitter(None)
+equal_pairs = DelimitedSplitter('=')
+last_colon = DelimitedSplitter(':', -1)
+
+
+class GenericRecord(dict):
+
+    """Holds data for a generic field ->: value mapping.
+
+    Override Required with {name:prototype} mapping. Each required name
+    will get a deepcopy of its prototype. For example, use an empty list to
+    guarantee that each instance has its own list for a particular field to
+    which items can be appended.
+
+    Raises AttributeError on attempt to delete required item, but does not
+    raise an exception on attempt to delete absent item.
+
+    This class explicitly does _not_ override __getitem__ or __setitem__ for
+    performance reasons: if you need to transform keys on get/set or if you
+    need to access items as attributes and vice versa, use MappedRecord
+    instead.
+    """
+    Required = {}
+
+    def __init__(self, *args, **kwargs):
+        """Reads kwargs as properties of self."""
+        # perform init on temp dict to preserve interface: will then translate
+        # aliased keys when loading into self
+        temp = {}
+        dict.__init__(temp, *args, **kwargs)
+        self.update(temp)
+        for name, prototype in viewitems(self.Required):
+            if name not in self:
+                self[name] = deepcopy(prototype)
+
+    def __delitem__(self, item):
+        """Deletes item or raises exception if item required.
+
+        Note: Fails silently if item absent.
+        """
+        if item in self.Required:
+            raise AttributeError("%s is a required item" % (item,))
+        try:
+            super(GenericRecord, self).__delitem__(item)
+        except KeyError:
+            pass
+
+    def copy(self):
+        """Coerces copy to correct type"""
+        temp = self.__class__(super(GenericRecord, self).copy())
+        # don't forget to copy attributes!
+        for attr, val in viewitems(self.__dict__):
+            temp.__dict__[attr] = deepcopy(val)
+        return temp
+
+
+class MappedRecord(GenericRecord):
+
+    """GenericRecord that maps names of fields onto standardized names.
+
+    Override Aliases in subclass for new mapping of OldName->NewName. Each
+    OldName can have only one NewName, but it's OK if several OldNames map
+    to the same NewName.
+
+    Note: can access fields either as items or as attributes. In addition,
+    can access either using nonstandard names or using standard names.
+
+    Implementation note: currently, just a dict with appropriate get/set
+    overrides and ability to access items as attributes. Attribute access
+    is about 10x slower than in GenericRecord, so make sure you need the
+    additional capabilities if you use MappedRecord instead of GenericRecord.
+
+    WARNING: MappedRecord pretends to have every attribute, so will never raise
+    AttributeError when trying to find an unknown attribute. This feature can
+    cause surprising interactions when a Delegator is delegating its
+    attributes to a MappedRecord, since any attributes defined in __init__ will
+    be set in the MappedRecord and not in the object itself. The solution is
+    to use the self.__dict__['AttributeName'] = foo syntax to force the
+    attributes to be set in the object and not the MappedRecord to which it
+    forwards.
+    """
+    Aliases = {}
+
+    DefaultValue = None
+
+    def _copy(self, prototype):
+        """Returns a copy of item."""
+        if hasattr(prototype, 'copy'):
+            return prototype.copy()
+        elif isinstance(prototype, list):
+            return prototype[:]
+        elif (isinstance(prototype, str) or isinstance(prototype, int) or
+              isinstance(prototype, tuple) or isinstance(prototype, complex) or
+              prototype is None):
+            return prototype  # immutable type: use directly
+        else:
+            return deepcopy(prototype)
+
+    def __init__(self, *args, **kwargs):
+        """Reads kwargs as properties of self."""
+        # perform init on temp dict to preserve interface: will then translate
+        # aliased keys when loading into self
+        temp = {}
+        unalias = self.unalias
+        dict.__init__(temp, *args, **kwargs)
+        for key, val in viewitems(temp):
+            self[unalias(key)] = val
+        for name, prototype in viewitems(self.Required):
+            new_name = unalias(name)
+            if new_name not in self:
+                self[new_name] = self._copy(prototype)
+
+    def unalias(self, key):
+        """Returns dealiased name for key, or key if not in alias."""
+        try:
+            return self.Aliases.get(key, key)
+        except TypeError:
+            return key
+
+    def __getattr__(self, attr):
+        """Returns None if field is absent, rather than raising exception."""
+        if attr in self:
+            return self[attr]
+        elif attr in self.__dict__:
+            return self.__dict__[attr]
+        elif attr.startswith('__'):  # don't retrieve private class attrs
+            raise AttributeError
+        elif hasattr(self.__class__, attr):
+            return getattr(self.__class__, attr)
+        else:
+            return self._copy(self.DefaultValue)
+
+    def __setattr__(self, attr, value):
+        """Sets attribute in self if absent, converting name if necessary."""
+        normal_attr = self.unalias(attr)
+        # we overrode __getattr__, so have to simulate getattr(self, attr) by
+        # calling superclass method and checking for AttributeError.
+        # BEWARE: dict defines __getattribute__, not __getattr__!
+        try:
+            super(MappedRecord, self).__getattribute__(normal_attr)
+            super(MappedRecord, self).__setattr__(normal_attr, value)
+        except AttributeError:
+            self[normal_attr] = value
+
+    def __delattr__(self, attr):
+        """Deletes attribute, converting name if necessary. Fails silently."""
+        normal_attr = self.unalias(attr)
+        if normal_attr in self.Required:
+            raise AttributeError("%s is a required attribute" % (attr,))
+        else:
+            try:
+                super(MappedRecord, self).__delattr__(normal_attr)
+            except AttributeError:
+                del self[normal_attr]
+
+    def __getitem__(self, item):
+        """Returns default if item is absent, rather than raising exception."""
+        normal_item = self.unalias(item)
+        return self.get(normal_item, self._copy(self.DefaultValue))
+
+    def __setitem__(self, item, val):
+        """Sets item, converting name if necessary."""
+        super(MappedRecord, self).__setitem__(self.unalias(item), val)
+
+    def __delitem__(self, item):
+        """Deletes item, converting name if necessary. Fails silently."""
+        normal_item = self.unalias(item)
+        super(MappedRecord, self).__delitem__(normal_item)
+
+    def __contains__(self, item):
+        """Tests membership, converting name if necessary."""
+        return super(MappedRecord, self).__contains__(self.unalias(item))
+
+    def get(self, item, default):
+        """Returns self[item] or default if not present. Silent on unhashable.
+        """
+        try:
+            return super(MappedRecord, self).get(self.unalias(item), default)
+        except TypeError:
+            return default
+
+    def setdefault(self, key, default=None):
+        """Returns self[key] or default (and sets self[key]=default)"""
+        return super(MappedRecord, self).setdefault(self.unalias(key), default)
+
+    def update(self, *args, **kwargs):
+        """Updates self with items in other"""
+        temp = {}
+        unalias = self.unalias
+        temp.update(*args, **kwargs)
+        for key, val in viewitems(temp):
+            self[unalias(key)] = val
+
+# The following methods are useful for handling particular types of fields in
+# line-oriented parsers
+
+
+def TypeSetter(constructor=None):
+    """Returns function that takes obj, field, val and sets obj.field = val.
+
+    constructor can be any callable that returns an object.
+    """
+    if constructor:
+        def setter(obj, field, val):
+            setattr(obj, field, constructor(val))
+    else:
+        def setter(obj, field, val):
+            setattr(obj, field, val)
+    return setter
+
+int_setter = TypeSetter(int)
+str_setter = TypeSetter(str)
+list_setter = TypeSetter(list)
+tuple_setter = TypeSetter(tuple)
+dict_setter = TypeSetter(dict)
+float_setter = TypeSetter(float)
+complex_setter = TypeSetter(complex)
+bool_setter = TypeSetter(bool)
+identity_setter = TypeSetter()
+
+
+def list_adder(obj, field, val):
+    """Adds val to list in obj.field, creating list if necessary."""
+    try:
+        getattr(obj, field).append(val)
+    except AttributeError:
+        setattr(obj, field, [val])
+
+
+def dict_adder(obj, field, val):
+    """If val is a sequence, adds key/value pair in obj.field: else adds key.
+    """
+    try:
+        key, value = val
+    except (ValueError, TypeError):
+        key, value = val, None
+    try:
+        getattr(obj, field)[key] = value
+    except AttributeError:
+        setattr(obj, field, {key: value})
+
+
+class LineOrientedConstructor(object):
+
+    """Constructs a MappedRecord from a sequence of lines."""
+
+    def __init__(self, Lines=None, LabelSplitter=space_pairs, FieldMap=None,
+                 Constructor=MappedRecord, Strict=False):
+        """Returns new LineOrientedConstructor.
+
+        Fields:
+            Lines: set of lines to construct record from (for convenience).
+            Default is None.
+
+            LabelSplitter: function that returns (label, data) tuple.
+            Default is to split on first space and strip components.
+
+            FieldMap: dict of {fieldname:handler} functions. Each function
+            has the signature (obj, field, val) and performs an inplace
+            action like setting field to val or appending val to field.
+            Default is empty dict.
+
+            Constructor: constructor for the resulting object.
+            Default is MappedRecord: beware of using constructors that don't
+            subclass MappedRecord.
+
+            Strict: boolean controlling whether to raise error on unrecognized
+            field. Default is False.
+        """
+        self.Lines = Lines or []
+        self.LabelSplitter = LabelSplitter
+        self.FieldMap = FieldMap or {}
+        self.Constructor = Constructor
+        self.Strict = Strict
+
+    def __call__(self, Lines=None):
+        """Returns the record constructed from Lines, or self.Lines"""
+        if Lines is None:
+            Lines = self.Lines
+        result = self.Constructor()
+        fieldmap = self.FieldMap
+        aka = result.unalias
+
+        splitter = self.LabelSplitter
+        for line in Lines:
+            # find out how many items we got, setting key and val appropiately
+            items = list(splitter(line))
+            num_items = len(items)
+            if num_items == 2:  # typical case: key-value pair
+                raw_field, val = items
+            elif num_items > 2:
+                raw_field = items[0]
+                val = items[1:]
+            elif len(items) == 1:
+                raw_field, val = items[0], None
+            elif not items:  # presumably had line with just a delimiter?
+                continue
+            # figure out if we know the field under its original name or as
+            # an alias
+            if raw_field in fieldmap:
+                field, mapper = raw_field, fieldmap[raw_field]
+            else:
+                new_field = aka(raw_field)
+                if new_field in fieldmap:
+                    field, mapper = new_field, fieldmap[new_field]
+                else:
+                    if self.Strict:
+                        raise FieldError(
+                            "Got unrecognized field %s" %
+                            (raw_field,))
+                    else:
+                        identity_setter(result, raw_field, val)
+                    continue
+            # if we found the field in the fieldmap, apply the correct function
+            try:
+                mapper(result, field, val)
+            except:  # Warning: this is a catchall for _any_ exception,
+                        # and may mask what's actually going wrong.
+                if self.Strict:
+                    raise FieldError("Could not handle line %s" % (line,))
+        return result
+
+
+def FieldWrapper(fields, splitter=None, constructor=None):
+    """Returns dict containing field->val mapping, one level.
+
+    fields should be list of fields, in order.
+    splitter should be something like a DelimitedSplitter that converts the
+        line into a sequence of fields.
+    constructor is a callable applied to the dict after construction.
+
+    Call result on a _single_ line, not a list of lines.
+
+    Note that the constructor should take a dict and return an object of some
+    useful type. Additionally, it is the _constructor's_ responsibility to
+    complain if there are not enough fields, since zip will silently truncate
+    at the shorter sequence. This is actually useful in the case where many of
+    the later fields are optional.
+    """
+    if splitter is None:
+        splitter = DelimitedSplitter(None, None)
+    if constructor:
+        def parser(line):
+            return constructor(dict(zip(fields, splitter(line))))
+    else:
+        def parser(line):
+            return dict(zip(fields, splitter(line)))
+    return parser
+
+
+def StrictFieldWrapper(fields, splitter=None, constructor=None):
+    """Returns dict containing field->val mapping, one level.
+
+    fields should be list of fields, in order.
+    splitter should be something like a DelimitedSplitter that converts the
+        line into a sequence of fields.
+    constructor is a callable applied to the dict after construction.
+
+    Call result on a _single_ line, not a list of lines.
+
+    Note that the constructor should take a dict and return an object of some
+    useful type. Raises RecordError if the wrong number of fields are returned
+    from the split.
+    """
+    if splitter is None:
+        splitter = DelimitedSplitter(None, None)
+    if constructor:
+        def parser(line):
+            items = splitter(line)
+            if len(items) != len(fields):
+                raise FieldError("Expected %s items but got %s: %s" %
+                                 (len(fields), len(items), items))
+            return constructor(dict(zip(fields, items)))
+    else:
+        def parser(line):
+            items = splitter(line)
+            if len(items) != len(fields):
+                raise FieldError("Expected %s items but got %s: %s" %
+                                 (len(fields), len(items), items))
+            return dict(zip(fields, items))
+    return parser
+
+
+def raise_unknown_field(field, data):
+    """Raises a FieldError, displaying the offending field and data."""
+    raise FieldError("Got unknown field %s with data %s" % (field, data))
+
+
+class FieldMorpher(object):
+
+    """When called, applies appropriate constructors to each value of dict.
+
+    Initialize using a dict of fieldname:constructor pairs.
+    """
+
+    def __init__(self, Constructors, Default=raise_unknown_field):
+        """Returns a new FieldMorpher, using appropriate constructors.
+
+        If a field is unknown, will try to set key and value to the results
+        of Default(key, value): in other words, the signature of Default should
+        take a key and a value and should return a key and a value. The
+        built-in value of Default raises a FieldError instead, but it will
+        often be useful to do things like return the key/value pair unchanged,
+        or to strip the key and the value and then add them.
+        """
+        self.Constructors = Constructors
+        self.Default = Default
+
+    def __call__(self, data):
+        """Returns a new dict containing information converted from data."""
+        result = {}
+        default = self.Default
+        cons = self.Constructors
+        for key, val in viewitems(data):
+            if key in cons:
+                result[key] = cons[key](val)
+            else:
+                new_key, new_val = default(key, val)
+                # if we now recognize the key, use its constructor on the old
+                # val
+                if new_key in cons:
+                    result[new_key] = cons[new_key](val)
+                # otherwise, enter the new key and the new val
+                else:
+                    result[new_key] = new_val
+        return result
diff --git a/skbio/parse/record_finder.py b/skbio/parse/record_finder.py
new file mode 100644
index 0000000..15b4951
--- /dev/null
+++ b/skbio/parse/record_finder.py
@@ -0,0 +1,193 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+"""Provides some classes for treating files as sequences of records.
+
+Typically more useful as subclasses. Covers the three main types of records:
+
+    DelimitedRecordFinder:  Records demarcated by an end line, e.g. '\\'
+    LabeledRecordFinder:    Records demarcated by a start line, e.g. '>label'
+    LineGrouper:            Records consisting of a certain number of lines.
+    TailedRecordFinder:     Records demarcated by an end mark, e.g. 'blah.'
+
+All the first classes ignore/delete blank lines and strip leading and trailing
+whitespace.  The TailedRecodeFinder is Functional similar to
+DelimitedRecordFinder except that it accept a is_tail function instead of a
+str.  Note that its default constuctor is rstrip instead of strip.
+"""
+
+from skbio.io import RecordError
+from skbio.io.util import open_file
+
+
+def is_empty(line):
+    """Returns True empty lines and lines consisting only of whitespace."""
+    return (not line) or line.isspace()
+
+
+def DelimitedRecordFinder(delimiter, constructor=str.strip, ignore=is_empty,
+                          keep_delimiter=True, strict=True):
+    """Returns function that returns successive delimited records from file.
+
+    Includes delimiter in return value. Returns list of relevant lines.
+
+    Default constructor is str.strip, but can supply another constructor
+    to transform lines and/or coerce into correct type. If constructor is None,
+    passes along the lines without alteration.
+
+    Skips any lines for which ignore(line) evaluates True (default is to skip
+    whitespace).
+
+    keep_delimiter: keep delimiter line at the end of last block if True
+    (default), otherwise discard delimiter line.
+
+    strict: when lines found after the last delimiter -- raise error if True
+    (default), otherwise yield the lines silently
+    """
+    def parser(lines):
+        curr = []
+        for line in lines:
+            if constructor is not None:
+                line = constructor(line)
+            # else:
+            #    line = l
+            # ignore blank lines
+            if ignore(line):
+                continue
+            # if we find the delimiter, return the line; otherwise, keep it
+            if line == delimiter:
+                if keep_delimiter:
+                    curr.append(line)
+                yield curr
+                curr = []
+            else:
+                curr.append(line)
+        if curr:
+            if strict:
+                raise RecordError("Found additional data after records: %s" %
+                                  (curr))
+            else:
+                yield curr
+    return parser
+
+# The following is an example of the sorts of iterators RecordFinder returns.
+GbFinder = DelimitedRecordFinder('//')
+
+
+def TailedRecordFinder(is_tail_line, constructor=str.rstrip, ignore=is_empty,
+                       strict=True):
+    """Returns function that returns successive tailed records from lines.
+
+    Includes tail line in return value. Returns list of relevant lines.
+
+    constructor: a modifier for each line, default is str.rstrip: to remove
+    \n and trailing spaces. If constructor is None, passes along the lines
+    without alteration.
+
+    Skips over any lines for which ignore(line) evaluates True (default is
+    to skip empty lines).  note that the line maybe modified by constructor.
+
+    strict: if True(default), raise error if the last line is not a tail.
+    otherwise, yield the last lines.
+    """
+    def parser(lines):
+        curr = []
+        for line in lines:
+            if constructor is not None:
+                line = constructor(line)
+            if ignore(line):
+                continue
+
+            curr.append(line)
+            # if we find the label, return the previous record
+            if is_tail_line(line):
+                yield curr
+                curr = []
+
+        # don't forget to return the last record in the file
+        if curr:
+            if strict:
+                raise RecordError('lines exist after the last tail_line '
+                                  'or no tail_line at all')
+            else:
+                yield curr
+
+    return parser
+
+
+def LabeledRecordFinder(is_label_line, constructor=str.strip, ignore=is_empty):
+    """Returns function that returns successive labeled records from file.
+
+    Includes label line in return value. Returns list of relevant lines.
+
+    Default constructor is string.strip, but can supply another constructor
+    to transform lines and/or coerce into correct type. If constructor is None,
+    passes along the lines without alteration.
+
+    Skips over any lines for which ignore(line) evaluates True (default is
+    to skip empty lines).
+
+    NOTE: Does _not_ raise an exception if the last line is a label line: for
+    some formats, this is acceptable. It is the responsibility of whatever is
+    parsing the sets of lines returned into records to complain if a record
+    is incomplete.
+    """
+    def parser(lines):
+        with open_file(lines) as lines:
+            curr = []
+            for l in lines:
+                try:
+                    l = str(l.decode('utf-8'))
+                except AttributeError:
+                    pass
+
+                if constructor is not None:
+                    line = constructor(l)
+                else:
+                    line = l
+                if ignore(line):
+                    continue
+                # if we find the label, return the previous record
+                if is_label_line(line):
+                    if curr:
+                        yield curr
+                        curr = []
+                curr.append(line)
+            # don't forget to return the last record in the file
+            if curr:
+                yield curr
+    return parser
+
+
+def LineGrouper(num, constructor=str.strip, ignore=is_empty):
+    """Returns num lines at a time, stripping and ignoring blanks.
+
+    Default constructor is str.strip, but can supply another constructor
+    to transform lines and/or coerce into correct type. If constructor is None,
+    passes along the lines without alteration.
+
+    Skips over any lines for which ignore(line) evaluates True: default is to
+    skip whitespace lines.
+
+    """
+    def parser(lines):
+        curr = []
+        for l in lines:
+            if constructor is not None:
+                line = constructor(l)
+            else:
+                line = l
+            if ignore(line):
+                continue
+            curr.append(line)
+            if len(curr) == num:
+                yield curr
+                curr = []
+        if curr:
+            raise RecordError("Non-blank lines not even multiple of %s" % num)
+    return parser
diff --git a/skbio/parse/sequences/__init__.py b/skbio/parse/sequences/__init__.py
new file mode 100644
index 0000000..2afdfd3
--- /dev/null
+++ b/skbio/parse/sequences/__init__.py
@@ -0,0 +1,201 @@
+r"""
+Parse biological sequences (:mod:`skbio.parse.sequences`)
+=========================================================
+
+.. currentmodule:: skbio.parse.sequences
+
+This module provides functions for parsing sequence files in a variety of
+different formats. Two interfaces are provided for parsing sequence files:
+sequence iterators (high-level, recommended interface) and parsing functions
+(lower-level interface).
+
+Sequence iterator interface
+---------------------------
+The sequence iterator interface is the recommended way to parse sequence files.
+The ``load`` function provides a standard, high-level interface to iterate over
+sequence files regardless of file type or whether they are compressed. The
+method accepts single or multiple file paths and employs the correct file
+handlers, iterator objects, and parsers for the user.
+
+The benefit of the sequence iterator interface is that the type of the file and
+any file format details are abstracted away from the user. In this manner, the
+user does not need to worry about whether they're operating on FASTA or FASTQ
+files or any differences in the returns from their respective parsers.
+
+Classes
+^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   SequenceIterator
+   FastaIterator
+   FastqIterator
+
+Functions
+^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+    load
+
+Examples
+^^^^^^^^
+For the first set of sequence iterator examples, we're going to use the
+``load`` function. The ``load`` function is intended to operate on file paths,
+so let's create two files for it to use. The first one will be a regular FASTA
+file, and the second will be a gzip'd FASTQ file:
+
+>>> import os
+>>> import gzip
+>>> out = open('test_seqs.fna', 'w')
+>>> out.write(">s1\nATGC\n>s2\nATGGC\n")
+>>> out.close()
+>>> outgz = gzip.open('test_seqs.fq.gz', 'w')
+>>> _ = outgz.write("@s3\nAATTGG\n+\nghghgh\n at s4\nAAA\n+\nfgh\n")
+>>> outgz.close()
+
+Now let's see what ``load`` can do:
+
+>>> it = load(['test_seqs.fna', 'test_seqs.fq.gz'], phred_offset=64)
+>>> for rec in it:
+...     print rec['SequenceID']
+...     print rec['Sequence']
+...     print rec['Qual']
+s1
+ATGC
+None
+s2
+ATGGC
+None
+s3
+AATTGG
+[39 40 39 40 39 40]
+s4
+AAA
+[38 39 40]
+
+To be polite, let's remove the files we just created:
+
+>>> os.remove('test_seqs.fna')
+>>> os.remove('test_seqs.fq.gz')
+
+In the following examples, we'll see how to use the sequence iterators directly
+instead of using ``load``.
+
+>>> from StringIO import StringIO
+>>> from skbio.parse.sequences import FastaIterator, FastqIterator
+
+In this first example, we're going to construct a FASTA iterator that is also
+paired with quality scores (e.g., as in 454 fasta/qual files).
+
+>>> seqs = StringIO(">seq1\n"
+...                 "ATGC\n"
+...                 ">seq2\n"
+...                 "TTGGCC\n")
+>>> qual = StringIO(">seq1\n"
+...                 "10 20 30 40\n"
+...                 ">seq2\n"
+...                 "1 2 3 4 5 6\n")
+>>> it = FastaIterator(seq=[seqs], qual=[qual])
+>>> for record in it:
+...     print record['Sequence']
+...     print record['Qual']
+ATGC
+[10 20 30 40]
+TTGGCC
+[1 2 3 4 5 6]
+
+In the next example, we're going to iterate over multiple FASTQ files at once.
+
+>>> seqs1 = StringIO("@seq1\n"
+...                  "ATGC\n"
+...                  "+\n"
+...                  "hhhh\n")
+>>> seqs2 = StringIO("@seq2\n"
+...                 "AATTGGCC\n"
+...                 ">seq2\n"
+...                 "abcdefgh\n")
+>>> it = FastqIterator(seq=[seqs1, seqs2], phred_offset=64)
+>>> for record in it:
+...     print record['Sequence']
+...     print record['Qual']
+ATGC
+[40 40 40 40]
+AATTGGCC
+[33 34 35 36 37 38 39 40]
+
+Finally, we can apply arbitrary transforms to the sequences during iteration.
+
+>>> seqs1 = StringIO("@seq1\n"
+...                  "ATGC\n"
+...                  "+\n"
+...                  "hhhh\n")
+>>> seqs2 = StringIO("@seq2\n"
+...                 "AATTGGCC\n"
+...                 ">seq2\n"
+...                 "abcdefgh\n")
+>>> def rev_f(st):
+...     st['Sequence'] = st['Sequence'][::-1]
+...     st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
+>>> it = FastqIterator(seq=[seqs1, seqs2], transform=rev_f, phred_offset=64)
+>>> for record in it:
+...     print record['Sequence']
+...     print record['Qual']
+CGTA
+[40 40 40 40]
+CCGGTTAA
+[40 39 38 37 36 35 34 33]
+
+Low-level parsing functions
+---------------------------
+Lower-level parsing functions are also made available in addition to the
+sequence iterator interface. These functions can be used to directly parse a
+single sequence file. They accept file paths, file handles, or file-like
+objects.
+
+Functions
+^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   parse_fasta
+   parse_fastq
+   parse_qual
+   write_clustal
+   parse_clustal
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   FastqParseError
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from .fasta import parse_fasta, parse_qual
+from .fastq import parse_fastq
+from .clustal import parse_clustal, write_clustal
+from .iterator import FastaIterator, FastqIterator, SequenceIterator
+from .factory import load
+from ._exception import FastqParseError
+
+__all__ = ['write_clustal', 'parse_clustal',
+           'parse_fasta', 'parse_fastq', 'parse_qual', 'FastqIterator',
+           'FastaIterator', 'SequenceIterator', 'load', 'FastqParseError']
+
+test = Tester().test
diff --git a/skbio/parse/sequences/_exception.py b/skbio/parse/sequences/_exception.py
new file mode 100644
index 0000000..2258a93
--- /dev/null
+++ b/skbio/parse/sequences/_exception.py
@@ -0,0 +1,16 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from skbio.io import FileFormatError
+
+
+class FastqParseError(FileFormatError):
+    """Exception raised when a FASTQ formatted file cannot be parsed"""
+    pass
diff --git a/skbio/parse/sequences/clustal.py b/skbio/parse/sequences/clustal.py
new file mode 100644
index 0000000..3160c8f
--- /dev/null
+++ b/skbio/parse/sequences/clustal.py
@@ -0,0 +1,100 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+
+from skbio.io import RecordError
+from skbio.parse.record import DelimitedSplitter
+import warnings
+
+
+def _label_line_parser(record, splitter, strict=True):
+    """Returns dict mapping list of data to labels, plus list with field order.
+
+    Field order contains labels in order encountered in file.
+
+    NOTE: doesn't care if lines are out of order in different blocks. This
+    should never happen anyway, but it's possible that this behavior should
+    be changed to tighten up validation.
+    """
+    labels = []
+    result = {}
+    for line in record:
+        try:
+            key, val = splitter(line.rstrip())
+        except:
+            if strict:
+                raise RecordError(
+                    "Failed to extract key and value from line %s" %
+                    line)
+            else:
+                continue  # just skip the line if not strict
+
+        if key in result:
+            result[key].append(val)
+        else:
+            result[key] = [val]
+            labels.append(key)
+    return result, labels
+
+
+def _is_clustal_seq_line(line):
+    """Returns True if line starts with a non-blank character but not 'CLUSTAL'
+
+    Useful for filtering other lines out of the file.
+    """
+    return line and (not line[0].isspace()) and\
+        (not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))
+
+last_space = DelimitedSplitter(None, -1)
+
+
+def _delete_trailing_number(line):
+    """Deletes trailing number from a line.
+
+    WARNING: does not preserve internal whitespace when a number is removed!
+    (converts each whitespace run to a single space). Returns the original
+    line if it didn't end in a number.
+    """
+    pieces = line.split()
+    try:
+        int(pieces[-1])
+        return ' '.join(pieces[:-1])
+    except ValueError:  # no trailing numbers
+        return line
+
+
+def write_clustal(records, fh):
+    warnings.warn(
+        "write_clustal is deprecated and will be removed in "
+        "scikit-bio 0.3.0. Please update your code to use Alignment.write.",
+        DeprecationWarning)
+    clen = 60
+    records = list(records)
+    names, seqs = zip(*records)
+    nameLen = max(map(len, names))
+    seqLen = max(map(len, seqs))
+    fh.write('CLUSTAL\n\n')
+    for i in range(0, seqLen, clen):
+        for label, seq in records:
+            name = ('{:<%d}' % (nameLen)).format(label)
+            fh.write("%s\t%s\t\n" % (name, seq[i:i+clen]))
+        fh.write("\n")
+
+
+def parse_clustal(record, strict=True):
+    warnings.warn(
+        "parse_clustal is deprecated and will be removed in "
+        "scikit-bio 0.3.0. Please update your code to use Alignment.read.",
+        DeprecationWarning)
+
+    records = map(_delete_trailing_number,
+                  filter(_is_clustal_seq_line, record))
+    data, labels = _label_line_parser(records, last_space, strict)
+
+    for key in labels:
+        yield key, ''.join(data[key])
diff --git a/skbio/parse/sequences/factory.py b/skbio/parse/sequences/factory.py
new file mode 100644
index 0000000..16e4667
--- /dev/null
+++ b/skbio/parse/sequences/factory.py
@@ -0,0 +1,147 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import os
+from gzip import open as gzip_open
+from itertools import chain
+
+from .iterator import FastaIterator, FastqIterator
+
+
+FILEEXT_MAP = {'fna': (FastaIterator, open),
+               'fna.gz': (FastaIterator, gzip_open),
+               'fasta': (FastaIterator, open),
+               'fasta.gz': (FastaIterator, gzip_open),
+               'qual': (FastaIterator, open),
+               'qual.gz': (FastaIterator, gzip_open),
+               'fastq': (FastqIterator, open),
+               'fastq.gz': (FastqIterator, gzip_open),
+               'fq': (FastqIterator, open),
+               'fq.gz': (FastqIterator, gzip_open)}
+
+
+def _determine_types_and_openers(files):
+    """Attempt to determine the appropriate iterators and openers"""
+    if files is None:
+        return [], []
+
+    iters = []
+    openers = []
+    for fpath in files:
+        if fpath.endswith('.gz'):
+            ext = '.'.join(fpath.rsplit('.', 2)[-2:])
+        else:
+            ext = fpath.rsplit('.', 1)[-1]
+
+        i, o = FILEEXT_MAP.get(ext, (None, None))
+        if i is None:
+            raise IOError("Unknown filetype for %s" % fpath)
+
+        iters.append(i)
+        openers.append(o)
+
+    return iters, openers
+
+
+def _is_single_iterator_type(iters):
+    """Determine if there is a single or multiple type of iterator
+
+    If iters is [], this method returns True it considers the null case to be
+    a single iterator type.
+    """
+    if iters:
+        return len(set(iters)) == 1
+    else:
+        return True
+
+
+def _open_or_none(opener, f):
+    """Open a file or returns None"""
+    if not opener:
+        return None
+    else:
+        name = opener.__name__
+
+        if not os.path.exists(f):
+            raise IOError("%s does not appear to exist!" % f)
+        try:
+            opened = opener(f)
+        except IOError:
+            raise IOError("Could not open %s with %s!" % (f, name))
+
+        return opened
+
+
+def load(seqs, qual=None, constructor=None, **kwargs):
+    """Construct the appropriate iterator for all your processing needs
+
+    This method will attempt to open all files correctly and to feed the
+    appropriate objects into the correct iterators.
+
+    Seqs can list multiple types of files (e.g., FASTA and FASTQ), but if
+    multiple file types are specified, qual must be None
+
+    Parameters
+    ----------
+    seqs : str or list of sequence file paths
+    qual : str or list of qual file paths or None
+    constructor : force a constructor on seqs
+    kwargs : dict
+        passed into the subsequent generators.
+
+    Returns
+    -------
+    SequenceIterator
+        the return is ``Iterable``
+
+    See Also
+    --------
+    SequenceIterator
+    FastaIterator
+    FastqIterator
+
+    """
+    if not seqs:
+        raise ValueError("Must supply sequences.")
+
+    if isinstance(seqs, str):
+        seqs = [seqs]
+
+    if isinstance(qual, str):
+        qual = [qual]
+
+    # i -> iters, o -> openers
+    if constructor is not None:
+        i_seqs = [constructor] * len(seqs)
+        o_seqs = [open] * len(seqs)
+    else:
+        i_seqs, o_seqs = _determine_types_and_openers(seqs)
+
+    i_qual, o_qual = _determine_types_and_openers(qual)
+
+    seqs = [_open_or_none(o, f) for f, o in zip(seqs, o_seqs)]
+    qual = [_open_or_none(o, f) for f, o in zip(qual or [], o_qual or [])]
+
+    if not qual:
+        qual = None
+
+    if not _is_single_iterator_type(i_seqs) and qual is not None:
+        # chaining Fasta/Fastq for sequence is easy, but it gets nasty quick
+        # if seqs is a mix of fasta/fastq, with qual coming in as there aren't
+        # 1-1 mappings. This could be addressed if necessary, but seems like
+        # an unnecessary block of code right now
+        raise ValueError("Cannot handle multiple sequence file types and qual "
+                         "file(s) at the same time.")
+
+    if _is_single_iterator_type(i_seqs):
+        seqs_constructor = i_seqs[0]
+        gen = seqs_constructor(seq=seqs, qual=qual, **kwargs)
+    else:
+        gen = chain(*[c(seq=[fp], **kwargs) for c, fp in zip(i_seqs, seqs)])
+
+    return gen
diff --git a/skbio/parse/sequences/fasta.py b/skbio/parse/sequences/fasta.py
new file mode 100644
index 0000000..747fc7d
--- /dev/null
+++ b/skbio/parse/sequences/fasta.py
@@ -0,0 +1,240 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+
+import warnings
+
+import numpy as np
+
+from skbio.io import RecordError
+from skbio.parse.record_finder import LabeledRecordFinder
+
+
+def is_fasta_label(x):
+    """Checks if x looks like a FASTA label line."""
+    return x.startswith('>')
+
+
+def is_blank_or_comment(x):
+    """Checks if x is blank or a FASTA comment line."""
+    return (not x) or x.startswith('#') or x.isspace()
+
+
+FastaFinder = LabeledRecordFinder(is_fasta_label, ignore=is_blank_or_comment)
+
+
+def parse_fasta(infile, strict=True, label_to_name=None, finder=FastaFinder,
+                label_characters='>', ignore_comment=False):
+    r"""Generator of labels and sequences from a fasta file.
+
+    .. note:: Deprecated in scikit-bio 0.2.0-dev
+       ``parse_fasta`` will be removed in scikit-bio 0.3.0. It is replaced by
+       ``read``, which is a more general method for deserializing
+       FASTA-formatted files. ``read`` supports multiple file formats,
+       automatic file format detection, etc. by taking advantage of
+       scikit-bio's I/O registry system. See :mod:`skbio.io` for more details.
+
+    Parameters
+    ----------
+    infile : open file object or str
+        An open fasta file or a path to a fasta file.
+
+    strict : bool
+        If ``True`` a ``RecordError`` will be raised if there is a fasta label
+        line with no associated sequence, or a sequence with no associated
+        label line (in other words, if there is a partial record). If
+        ``False``, partial records will be skipped.
+
+    label_to_name : function
+        A function to apply to the sequence label (i.e., text on the header
+        line) before yielding it. By default, the sequence label is returned
+        with no processing. This function must take a single string as input
+        and return a single string as output.
+
+    finder : function
+        The function to apply to find records in the fasta file. In general
+        you should not have to change this.
+
+    label_characters : str
+        String used to indicate the beginning of a new record. In general you
+        should not have to change this.
+
+    ignore_comment : bool
+        If `True`, split the sequence label on spaces, and return the label
+        only as the first space separated field (i.e., the sequence
+        identifier). Note: if both ``ignore_comment`` and ``label_to_name`` are
+        passed, ``ignore_comment`` is ignored (both operate on the label, so
+        there is potential for things to get messy otherwise).
+
+    Returns
+    -------
+    two-item tuple of str
+        yields the label and sequence for each entry.
+
+    Raises
+    ------
+    RecordError
+        If ``strict == True``, raises a ``RecordError`` if there is a fasta
+        label line with no associated sequence, or a sequence with no
+        associated label line (in other words, if there is a partial record).
+
+    Examples
+    --------
+    Assume we have a fasta-formatted file with the following contents::
+
+        >seq1 db-accession-149855
+        CGATGTCGATCGATCGATCGATCAG
+        >seq2 db-accession-34989
+        CATCGATCGATCGATGCATGCATGCATG
+
+    >>> from StringIO import StringIO
+    >>> fasta_f = StringIO('>seq1 db-accession-149855\n'
+    ...                    'CGATGTCGATCGATCGATCGATCAG\n'
+    ...                    '>seq2 db-accession-34989\n'
+    ...                    'CATCGATCGATCGATGCATGCATGCATG\n')
+
+    We can parse this as follows:
+
+    >>> from skbio.parse.sequences import parse_fasta
+    >>> for label, seq in parse_fasta(fasta_f):
+    ...     print(label, seq)
+    seq1 db-accession-149855 CGATGTCGATCGATCGATCGATCAG
+    seq2 db-accession-34989 CATCGATCGATCGATGCATGCATGCATG
+
+    The sequence label or header line in a fasta file is defined as containing
+    two separate pieces of information, delimited by a space. The first space-
+    separated entry is the sequence identifier, and everything following the
+    first space is considered additional information (e.g., comments about the
+    source of the sequence or the molecule that it encodes). Often we don't
+    care about that information within our code. If you want to just return the
+    sequence identifier from that line, you can pass ``ignore_comment=True``:
+
+    >>> from StringIO import StringIO
+    >>> fasta_f = StringIO('>seq1 db-accession-149855\n'
+    ...                    'CGATGTCGATCGATCGATCGATCAG\n'
+    ...                    '>seq2 db-accession-34989\n'
+    ...                    'CATCGATCGATCGATGCATGCATGCATG\n')
+
+    >>> from skbio.parse.sequences import parse_fasta
+    >>> for label, seq in parse_fasta(fasta_f, ignore_comment=True):
+    ...     print(label, seq)
+    seq1 CGATGTCGATCGATCGATCGATCAG
+    seq2 CATCGATCGATCGATGCATGCATGCATG
+
+    """
+    warnings.warn(
+        "`parse_fasta` is deprecated and will be removed in scikit-bio 0.3.0. "
+        "Please update your code to use `skbio.io.read(fh, format='fasta')` "
+        "to obtain a generator of `BiologicalSequence` objects (or "
+        "subclasses, see the `constructor` parameter).", DeprecationWarning)
+
+    for rec in finder(infile):
+        # first line must be a label line
+        if not rec[0][0] in label_characters:
+            if strict:
+                raise RecordError(
+                    "Found Fasta record without label line: %s" % rec)
+            else:
+                continue
+        # record must have at least one sequence
+        if len(rec) < 2:
+            if strict:
+                raise RecordError(
+                    "Found label line without sequences: %s" % rec)
+            else:
+                continue
+
+        # remove the label character from the beginning of the label
+        label = rec[0][1:].strip()
+        # if the user passed a label_to_name function, apply that to the label
+        if label_to_name is not None:
+            label = label_to_name(label)
+        # otherwise, if the user passed ignore_comment, split the label on
+        # spaces, and return the first space separated field (i.e., the
+        # sequence identifier)
+        elif ignore_comment:
+            label = label.split()[0]
+        else:
+            pass
+
+        # join the sequence lines into a single string
+        seq = ''.join(rec[1:])
+
+        yield label, seq
+
+
+def parse_qual(infile, full_header=False):
+    r"""yields label and qual from a qual file.
+
+    .. note:: Deprecated in scikit-bio 0.2.0-dev
+       ``parse_qual`` will be removed in scikit-bio 0.3.0. It is replaced by
+       ``read``, which is a more general method for deserializing
+       FASTA/QUAL-formatted files. ``read`` supports multiple file formats,
+       automatic file format detection, etc. by taking advantage of
+       scikit-bio's I/O registry system. See :mod:`skbio.io` for more details.
+
+    Parameters
+    ----------
+    infile : open file object or str
+        An open fasta file or path to it.
+
+    full_header : bool
+        Return the full header or just the id
+
+    Returns
+    -------
+    label : str
+        The quality label
+    qual : array
+        The quality at each position
+
+    Examples
+    --------
+    Assume we have a qual formatted file with the following contents::
+
+        >seq1
+        10 20 30 40
+        >seq2
+        1 2 3 4
+
+    >>> from StringIO import StringIO
+    >>> from skbio.parse.sequences import parse_qual
+    >>> qual_f = StringIO('>seq1\n'
+    ...                   '10 20 30 40\n'
+    ...                   '>seq2\n'
+    ...                   '1 2 3 4\n')
+    >>> for label, qual in parse_qual(qual_f):
+    ...     print(label)
+    ...     print(qual)
+    seq1
+    [10 20 30 40]
+    seq2
+    [1 2 3 4]
+
+    """
+    warnings.warn(
+        "`parse_qual` is deprecated and will be removed in scikit-bio 0.3.0. "
+        "Please update your code to use "
+        "`skbio.io.read(fasta_fh, qual=qual_fh, format='fasta')` to obtain a "
+        "generator of `BiologicalSequence` objects (or subclasses, see the "
+        "`constructor` parameter) with quality scores.", DeprecationWarning)
+
+    for rec in FastaFinder(infile):
+        curr_id = rec[0][1:]
+        curr_qual = ' '.join(rec[1:])
+        try:
+            parts = np.asarray(curr_qual.split(), dtype=int)
+        except ValueError:
+            raise RecordError(
+                "Invalid qual file. Check the format of the qual file: each "
+                "quality score must be convertible to an integer.")
+        if full_header:
+            curr_pid = curr_id
+        else:
+            curr_pid = curr_id.split()[0]
+        yield (curr_pid, parts)
diff --git a/skbio/parse/sequences/fastq.py b/skbio/parse/sequences/fastq.py
new file mode 100644
index 0000000..4d2736d
--- /dev/null
+++ b/skbio/parse/sequences/fastq.py
@@ -0,0 +1,176 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+from future.standard_library import hooks
+
+import warnings
+
+import numpy as np
+
+from skbio.io.util import open_file
+from ._exception import FastqParseError
+
+with hooks():
+    from itertools import zip_longest
+
+
+def _ascii_to_phred(s, offset):
+    """Convert ascii to Phred quality score with specified ASCII offset."""
+    return np.fromstring(s, dtype='|S1').view(np.int8) - offset
+
+
+def ascii_to_phred33(s):
+    """Convert ascii string to Phred quality score with ASCII offset of 33.
+
+    Standard "Sanger" ASCII offset of 33. This is used by Illumina in CASAVA
+    versions after 1.8.0, and most other places. Note that internal Illumina
+    files still use offset of 64
+    """
+    return _ascii_to_phred(s, 33)
+
+
+def ascii_to_phred64(s):
+    """Convert ascii string to Phred quality score with ASCII offset of 64.
+
+    Illumina-specific ASCII offset of 64. This is used by Illumina in CASAVA
+    versions prior to 1.8.0, and in Illumina internal formats (e.g.,
+    export.txt files).
+    """
+    return _ascii_to_phred(s, 64)
+
+
+def _drop_id_marker(s):
+    """Drop the first character and decode bytes to text"""
+    id_ = s[1:]
+    try:
+        return str(id_.decode('utf-8'))
+    except AttributeError:
+        return id_
+
+
+def parse_fastq(data, strict=False, enforce_qual_range=True, phred_offset=33):
+    r"""yields label, seq, and qual from a fastq file.
+
+    .. note:: Deprecated in scikit-bio 0.2.0-dev
+       ``parse_fastq`` will be removed in scikit-bio 0.3.0. It is replaced by
+       ``read``, which is a more general method for deserializing
+       FASTQ-formatted files. ``read`` supports multiple file formats,
+       automatic file format detection, etc. by taking advantage of
+       scikit-bio's I/O registry system. See :mod:`skbio.io` for more details.
+
+    Parameters
+    ----------
+    data : open file object or str
+        An open fastq file (opened in binary mode) or a path to it.
+    strict : bool, optional
+        Defaults to ``False``. If strict is true a FastqParse error will be
+        raised if the seq and qual labels dont' match.
+    enforce_qual_range : bool, optional
+        Defaults to ``True``. If ``True``, an exception will be raised if a
+        quality score outside the range [0, 62] is detected
+    phred_offset : {33, 64}, optional
+        What Phred offset to use when converting qual score symbols to integers
+
+    Returns
+    -------
+    label, seq, qual : (str, bytes, np.array)
+        yields the label, sequence and quality for each entry
+
+    Examples
+    --------
+    Assume we have a fastq formatted file with the following contents::
+
+        @seq1
+        AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+        +
+        ````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
+        @seq2
+        TATGTATATATAACATATACATATATACATACATA
+        +
+        ]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
+
+    We can use the following code:
+
+    >>> from StringIO import StringIO
+    >>> from skbio.parse.sequences import parse_fastq
+    >>> fastq_f = StringIO('@seq1\n'
+    ...                     'AACACCAAACTTCTCCACCACGTGAGCTACAAAAG\n'
+    ...                     '+\n'
+    ...                     '````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF\n'
+    ...                     '@seq2\n'
+    ...                     'TATGTATATATAACATATACATATATACATACATA\n'
+    ...                     '+\n'
+    ...                     ']KZ[PY]_[YY^```ac^\\\`bT``c`\\aT``bbb\n')
+    >>> for label, seq, qual in parse_fastq(fastq_f, phred_offset=64):
+    ...     print(label)
+    ...     print(seq)
+    ...     print(qual)
+    seq1
+    AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+    [32 32 32 32 25 30 20 29 32 29 35 30 35 33 34 35 33 35 35 32 30 12 34 30 35
+     35 25 20 28 20 28 25 28 23  6]
+    seq2
+    TATGTATATATAACATATACATATATACATACATA
+    [29 11 26 27 16 25 29 31 27 25 25 30 32 32 32 33 35 30 28 28 32 34 20 32 32
+     35 32 28 33 20 32 32 34 34 34]
+    """
+    warnings.warn(
+        "`parse_fastq` is deprecated and will be removed in scikit-bio 0.3.0. "
+        "Please update your code to use `skbio.io.read(fh, format='fastq')` "
+        "to obtain a generator of `BiologicalSequence` objects (or "
+        "subclasses, see the `constructor` parameter).", DeprecationWarning)
+
+    if phred_offset == 33:
+        phred_f = ascii_to_phred33
+    elif phred_offset == 64:
+        phred_f = ascii_to_phred64
+    else:
+        raise ValueError("Unknown PHRED offset of %s" % phred_offset)
+
+    with open_file(data, 'rb') as data:
+        iters = [iter(data)] * 4
+        for seqid, seq, qualid, qual in zip_longest(*iters):
+            seqid = seqid.strip()
+            # If the file simply ended in a blankline, do not error
+            if seqid is '':
+                continue
+            # Error if an incomplete record is found
+            # Note: seqid cannot be None, because if all 4 values were None,
+            # then the loop condition would be false, and we could not have
+            # gotten to this point
+            if seq is None or qualid is None or qual is None:
+                raise FastqParseError("Incomplete FASTQ record found at end "
+                                      "of file")
+
+            seq = seq.strip()
+            qualid = qualid.strip()
+            qual = qual.strip()
+
+            seqid = _drop_id_marker(seqid)
+
+            try:
+                seq = str(seq.decode("utf-8"))
+            except AttributeError:
+                pass
+
+            qualid = _drop_id_marker(qualid)
+            if strict:
+                if seqid != qualid:
+                    raise FastqParseError('ID mismatch: {} != {}'.format(
+                        seqid, qualid))
+
+            # bounds based on illumina limits, see:
+            # http://nar.oxfordjournals.org/content/38/6/1767/T1.expansion.html
+            qual = phred_f(qual)
+            if enforce_qual_range and ((qual < 0).any() or (qual > 62).any()):
+                raise FastqParseError("Failed qual conversion for seq id: %s. "
+                                      "This may be because you passed an "
+                                      "incorrect value for phred_offset." %
+                                      seqid)
+
+            yield (seqid, seq, qual)
diff --git a/skbio/parse/sequences/iterator.py b/skbio/parse/sequences/iterator.py
new file mode 100644
index 0000000..6dd4cb1
--- /dev/null
+++ b/skbio/parse/sequences/iterator.py
@@ -0,0 +1,206 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from itertools import chain
+
+from future.builtins import zip
+
+from skbio.workflow import Workflow, not_none, method, requires
+from .fasta import parse_fasta, parse_qual
+from .fastq import parse_fastq
+
+
+def _has_qual(item):
+    """Return True if it appears that there is qual data"""
+    return (item['QualID'] is not None) and (item['Qual'] is not None)
+
+
+class SequenceIterator(Workflow):
+    """Provide a standard API for interacting with sequence data
+
+    Provide a common interface for iterating over sequence data, including
+    support for quality scores and transforms.
+
+    A transform method is a function that takes the state dict and modifies it
+    in place. For instance, to reverse sequences, you could pass in the
+    following function:
+
+    >>> def reverse(st):
+    ...    st['Sequence']= st['Sequence'][::-1]
+    ...    st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
+
+    as ``transform``. The primary intention is to support reverse complementing
+    of sequences.
+
+    All subclasses of this object are expected to update the following in
+    ``state``:
+
+        * SequenceID : str, the sequence identifier
+        * Sequence   : str, the sequence itself
+        * QualID     : str or None, the quality ID (for completeness)
+        * Qual       : np.array or None, the quality scores
+
+    ``state`` is preallocated a single time to avoid repetitive allocations.
+    What this means is that the object being yielded is updated in place. If
+    an individual record needs to be tracked over time, then it is recommended
+    that copies of the yielded data are made.
+
+    *WARNING*: The yielded obj is not safe for use with Python 2.7's builtin
+    `zip` method as the state is updated in place.
+
+    Parameters
+    ----------
+
+    seq : list of open file-like objects
+    qual : list of open file-like objects or None
+    transform : function or None
+        If provided, this function will be passed ``state``
+    valid_id : bool
+        If true, verify sequence and qual IDs are identical (if relevant)
+    valid_length : bool
+        If true, verify the length of the sequence and qual are the same
+        (if relevant)
+
+    Attributes
+    ----------
+
+    seq
+    qual
+    state
+    options
+
+    """
+    def __init__(self, seq, qual=None, transform=None, valid_id=True,
+                 valid_length=True, **kwargs):
+        self.seq = seq
+        self.qual = qual
+
+        self._transform = transform
+
+        state = {'SequenceID': None,
+                 'Sequence': None,
+                 'QualID': None,
+                 'Qual': None}
+
+        options = {'transform': self._transform,
+                   'valid_id': valid_id,
+                   'valid_length': valid_length}
+
+        if self.seq is None:
+            raise ValueError("SequenceIterator requires sequences!")
+
+        super(SequenceIterator, self).__init__(state, options=options)
+
+    def _gen(self):
+        """Yield a populated record"""
+        raise NotImplementedError("Must be implemented by subclass")
+
+    def __call__(self):
+        return super(SequenceIterator, self).__call__(self._gen())
+
+    def __iter__(self):
+        return self()
+
+    def initialize_state(self, item):
+        """Do nothing here as the subclassed iterators update state directly"""
+        pass
+
+    @method(priority=100)
+    @requires(option='valid_id', values=True, state=_has_qual)
+    def validate_ids(self):
+        self.failed = self.state['SequenceID'] != self.state['QualID']
+
+    @method(priority=90)
+    @requires(option='valid_length', values=True, state=_has_qual)
+    def valid_lengths(self):
+        self.failed = len(self.state['Sequence']) != len(self.state['Qual'])
+
+    @method(priority=80)
+    @requires(option='transform', values=not_none)
+    def transform(self):
+        """Transform state if necessary"""
+        self._transform(self.state)
+
+
+class FastaIterator(SequenceIterator):
+    """Populate state based on fasta sequence and qual (if provided)"""
+    def _gen(self):
+        """Construct internal iterators"""
+        # construct fasta generators
+        fasta_gens = chain(*[parse_fasta(f) for f in self.seq])
+
+        # construct qual generators if necessary
+        if self.qual is not None:
+            qual_gens = chain(*[parse_qual(f) for f in self.qual])
+        else:
+            qual_gens = None
+
+        # determine which specific generator to return
+        if qual_gens is None:
+            gen = self._fasta_gen(fasta_gens)
+        else:
+            gen = self._fasta_qual_gen(fasta_gens, qual_gens)
+
+        return gen
+
+    def _fasta_gen(self, fasta_gens):
+        """Yield fasta data"""
+        _iter = fasta_gens
+        for (seq_id, seq) in _iter:
+            self.state['SequenceID'] = seq_id
+            self.state['Sequence'] = seq
+
+            # as we're updating state in place and effectively circumventing
+            # Workflow.initialize_state, we do not need to yield anything
+            yield None
+
+    def _fasta_qual_gen(self, fasta_gen, qual_gen):
+        """Yield fasta and qual together"""
+        _iter = zip(fasta_gen, qual_gen)
+        for (seq_id, seq), (qual_id, qual) in _iter:
+            self.state['SequenceID'] = seq_id
+            self.state['Sequence'] = seq
+            self.state['QualID'] = qual_id
+            self.state['Qual'] = qual
+
+            # as we're updating state in place and effectively circumventing
+            # Workflow.initialize_state, we do not need to yield anything
+            yield None
+
+
+class FastqIterator(SequenceIterator):
+    """Populate state based on fastq sequence
+
+    Note: thq 'qual' keyword argument is ignored by this object.
+    """
+    def __init__(self, *args, **kwargs):
+        if 'phred_offset' in kwargs:
+            self._fpo = kwargs.pop('phred_offset')
+        else:
+            # force to an offset of 33
+            self._fpo = 33
+
+        super(FastqIterator, self).__init__(*args, **kwargs)
+
+    def _gen(self):
+        """Construct internal iterators"""
+        fastq_gens = chain(*[parse_fastq(f, phred_offset=self._fpo)
+                             for f in self.seq])
+        return self._fastq_gen(fastq_gens)
+
+    def _fastq_gen(self, fastq_gens):
+        """Yield fastq data"""
+        for (seq_id, seq, qual) in fastq_gens:
+            self.state['SequenceID'] = seq_id
+            self.state['Sequence'] = seq
+            self.state['QualID'] = seq_id
+            self.state['Qual'] = qual
+
+            # as we're updating state in place and effectively circumventing
+            # Workflow.initialize_state, we do not need to yield anything
+            yield None
diff --git a/skbio/parse/sequences/tests/__init__.py b/skbio/parse/sequences/tests/__init__.py
new file mode 100644
index 0000000..774824a
--- /dev/null
+++ b/skbio/parse/sequences/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
diff --git a/skbio/parse/sequences/tests/data/fna1.fasta b/skbio/parse/sequences/tests/data/fna1.fasta
new file mode 100644
index 0000000..40db637
--- /dev/null
+++ b/skbio/parse/sequences/tests/data/fna1.fasta
@@ -0,0 +1,4 @@
+>s1
+ATGC
+>s2
+AATTGG
diff --git a/skbio/parse/sequences/tests/data/fna1.fna.gz b/skbio/parse/sequences/tests/data/fna1.fna.gz
new file mode 100644
index 0000000..79b4f8b
Binary files /dev/null and b/skbio/parse/sequences/tests/data/fna1.fna.gz differ
diff --git a/skbio/parse/sequences/tests/data/fna1.qual b/skbio/parse/sequences/tests/data/fna1.qual
new file mode 100644
index 0000000..ece3e81
--- /dev/null
+++ b/skbio/parse/sequences/tests/data/fna1.qual
@@ -0,0 +1,4 @@
+>s1
+10 10 10 10
+>s2
+10 20 30 40 30 20
diff --git a/skbio/parse/sequences/tests/data/fq1.fastq.gz b/skbio/parse/sequences/tests/data/fq1.fastq.gz
new file mode 100644
index 0000000..e4ed599
Binary files /dev/null and b/skbio/parse/sequences/tests/data/fq1.fastq.gz differ
diff --git a/skbio/parse/sequences/tests/data/fq1.fq b/skbio/parse/sequences/tests/data/fq1.fq
new file mode 100644
index 0000000..a15ea63
--- /dev/null
+++ b/skbio/parse/sequences/tests/data/fq1.fq
@@ -0,0 +1,8 @@
+ at s1
+ATGC
++
+hhhh
+ at s2
+AATTGG
++
+gggghh
diff --git a/skbio/parse/sequences/tests/data/noextensionfasta b/skbio/parse/sequences/tests/data/noextensionfasta
new file mode 100644
index 0000000..d262c0f
--- /dev/null
+++ b/skbio/parse/sequences/tests/data/noextensionfasta
@@ -0,0 +1,4 @@
+>seq1
+AATTGG
+>seq2
+ATATA
diff --git a/skbio/parse/sequences/tests/data/qs1.qseq.gz b/skbio/parse/sequences/tests/data/qs1.qseq.gz
new file mode 100644
index 0000000..c97ed7a
Binary files /dev/null and b/skbio/parse/sequences/tests/data/qs1.qseq.gz differ
diff --git a/skbio/parse/sequences/tests/test_clustal.py b/skbio/parse/sequences/tests/test_clustal.py
new file mode 100644
index 0000000..b2a9c7e
--- /dev/null
+++ b/skbio/parse/sequences/tests/test_clustal.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+
+from skbio.parse.sequences import parse_clustal, write_clustal
+from skbio.parse.sequences.clustal import (_is_clustal_seq_line, last_space,
+                                           _delete_trailing_number)
+from skbio.io import RecordError
+
+
+class ClustalTests(TestCase):
+
+    """Tests of top-level functions."""
+
+    def test_is_clustal_seq_line(self):
+        """_is_clustal_seq_line should reject blanks and 'CLUSTAL'"""
+        ic = _is_clustal_seq_line
+        assert ic('abc')
+        assert ic('abc  def')
+        assert not ic('CLUSTAL')
+        assert not ic('CLUSTAL W fsdhicjkjsdk')
+        assert not ic('  *   *')
+        assert not ic(' abc def')
+        assert not ic('MUSCLE (3.41) multiple sequence alignment')
+
+    def test_last_space(self):
+        """last_space should split on last whitespace"""
+        self.assertEqual(last_space('a\t\t\t  b    c'), ['a b', 'c'])
+        self.assertEqual(last_space('xyz'), ['xyz'])
+        self.assertEqual(last_space('  a b'), ['a', 'b'])
+
+    def test_delete_trailing_number(self):
+        """Should delete the trailing number if present"""
+        dtn = _delete_trailing_number
+        self.assertEqual(dtn('abc'), 'abc')
+        self.assertEqual(dtn('a b c'), 'a b c')
+        self.assertEqual(dtn('a \t  b  \t  c'), 'a \t  b  \t  c')
+        self.assertEqual(dtn('a b 3'), 'a b')
+        self.assertEqual(dtn('a b c \t 345'), 'a b c')
+
+
+class ClustalParserTests(TestCase):
+
+    """Tests of the parse_clustal function"""
+
+    def test_null(self):
+        """Should return empty dict and list on null input"""
+        result = parse_clustal([])
+        self.assertEqual(dict(result), {})
+
+    def test_minimal(self):
+        """Should handle single-line input correctly"""
+        result = parse_clustal([MINIMAL])  # expects seq of lines
+        self.assertEqual(dict(result), {'abc': 'ucag'})
+
+    def test_two(self):
+        """Should handle two-sequence input correctly"""
+        result = parse_clustal(TWO)
+        self.assertEqual(dict(result), {'abc': 'uuuaaa', 'def': 'cccggg'})
+
+    def test_real(self):
+        """Should handle real Clustal output"""
+        data = parse_clustal(REAL)
+        self.assertEqual(dict(data), {
+            'abc':
+            'GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA'
+            'GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC'
+            'UGACUAGUCAGCUAGCAUCGAUCAGU',
+            'def':
+            '------------------------------------------------------------'
+            '-----------------------------------------CGCGAUGCAUGCAU-CGAU'
+            'CGAUCAGUCAGUCGAU----------',
+            'xyz':
+            '------------------------------------------------------------'
+            '-------------------------------------CAUGCAUCGUACGUACGCAUGAC'
+            'UGCUGCAUCA----------------'
+        })
+
+    def test_bad(self):
+        """Should reject bad data if strict"""
+        result = parse_clustal(BAD, strict=False)
+        self.assertEqual(dict(result), {})
+        # should fail unless we turned strict processing off
+        with self.assertRaises(RecordError):
+            dict(parse_clustal(BAD))
+
+    def test_space_labels(self):
+        """Should tolerate spaces in labels"""
+        result = parse_clustal(SPACE_LABELS)
+        self.assertEqual(dict(result), {'abc': 'uca', 'def ggg': 'ccc'})
+
+    def test_write(self):
+        """Should write real Clustal output"""
+        import os
+        fname = "test.aln"
+        testfile = open(fname, 'w')
+        seqs = [('abc',
+                 'GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA'
+                 'GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC'
+                 'UGACUAGUCAGCUAGCAUCGAUCAGU'),
+                ('def',
+                 '------------------------------------------------------------'
+                 '-----------------------------------------CGCGAUGCAUGCAU-CGAU'
+                 'CGAUCAGUCAGUCGAU----------'),
+                ('xyz',
+                 '------------------------------------------------------------'
+                 '-------------------------------------CAUGCAUCGUACGUACGCAUGAC'
+                 'UGCUGCAUCA----------------')]
+        records = (x for x in seqs)
+        write_clustal(records, testfile)
+        testfile.close()
+        raw = open(fname, 'r').read()
+        data = parse_clustal(raw.split('\n'))
+        data = list(data)
+        self.assertEqual(len(data), len(seqs))
+        self.assertEqual(set(data), set(seqs))
+        testfile.close()
+        os.remove(fname)
+
+MINIMAL = 'abc\tucag'
+TWO = 'abc\tuuu\ndef\tccc\n\n    ***\n\ndef ggg\nabc\taaa\n'.split('\n')
+
+REAL = """CLUSTAL W (1.82) multiple sequence alignment
+
+
+abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA 60
+def             ------------------------------------------------------------
+xyz             ------------------------------------------------------------
+
+
+abc             GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC 11
+def             -----------------------------------------CGCGAUGCAUGCAU-CGAU 18
+xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC 23
+                                                         *    * * * *    **
+
+abc             UGACUAGUCAGCUAGCAUCGAUCAGU 145
+def             CGAUCAGUCAGUCGAU---------- 34
+xyz             UGCUGCAUCA---------------- 33
+                *     ***""".split('\n')
+
+BAD = ['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']
+
+SPACE_LABELS = ['abc uca', 'def ggg ccc']
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/parse/sequences/tests/test_factory.py b/skbio/parse/sequences/tests/test_factory.py
new file mode 100644
index 0000000..19f222f
--- /dev/null
+++ b/skbio/parse/sequences/tests/test_factory.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+from numpy import array
+
+from skbio import FastaIterator
+from skbio.parse.sequences import load
+from skbio.parse.sequences.factory import (
+    _open_or_none, _is_single_iterator_type)
+from skbio.util import get_data_path
+
+
+class SequenceLoadTests(TestCase):
+    def setUp(self):
+        self.fna1 = get_data_path('fna1.fasta')
+        self.fna1gz = get_data_path('fna1.fna.gz')
+        self.fq1 = get_data_path('fq1.fq')
+        self.fq1gz = get_data_path('fq1.fastq.gz')
+        self.qual1 = get_data_path('fna1.qual')
+        self.noext = get_data_path('noextensionfasta')
+
+    def test_single_files(self):
+        """load should handle a single file, and can be gzipped"""
+        it = load(self.fna1)
+        obs = [rec.copy() for rec in it]
+        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
+                'QualID': None, 'Qual': None},
+               {'Sequence': 'AATTGG', 'SequenceID': 's2',
+                'QualID': None, 'Qual': None}]
+        self.assertEqual(obs, exp)
+        it = load(self.fq1, phred_offset=64)
+        obs = [rec.copy() for rec in it]
+        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
+                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
+               {'Sequence': 'AATTGG', 'SequenceID': 's2',
+                'QualID': 's2', 'Qual': array([39, 39, 39, 39, 40, 40])}]
+        for o, e in zip(obs, exp):
+            self.assertEqual(o['Sequence'], e['Sequence'])
+            self.assertEqual(o['SequenceID'], e['SequenceID'])
+            self.assertEqual(o['QualID'], e['QualID'])
+            self.assertTrue((o['Qual'] == e['Qual']).all())
+
+        it = load(self.fna1gz)
+        obs = [rec.copy() for rec in it]
+        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
+                'QualID': None, 'Qual': None},
+               {'Sequence': 'AATTGG', 'SequenceID': 's2',
+                'QualID': None, 'Qual': None}]
+        self.assertEqual(obs, exp)
+
+        it = load(self.fq1gz, phred_offset=64)
+        obs = [rec.copy() for rec in it]
+        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
+                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
+               {'Sequence': 'AATTGG', 'SequenceID': 's2',
+                'QualID': 's2', 'Qual': array([39, 39, 39, 39, 40, 40])}]
+        for o, e in zip(obs, exp):
+            self.assertEqual(o['Sequence'], e['Sequence'])
+            self.assertEqual(o['SequenceID'], e['SequenceID'])
+            self.assertEqual(o['QualID'], e['QualID'])
+            self.assertTrue((o['Qual'] == e['Qual']).all())
+
+    def test_multiple_files(self):
+        """load should handle multiple files of different types"""
+        it = load([self.fq1, self.fna1], phred_offset=64)
+        obs = [rec.copy() for rec in it]
+        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
+                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
+               {'Sequence': 'AATTGG', 'SequenceID': 's2',
+                'QualID': 's2', 'Qual': array([39, 39, 39, 39, 40, 40])},
+               {'Sequence': 'ATGC', 'SequenceID': 's1',
+                'QualID': None, 'Qual': None},
+               {'Sequence': 'AATTGG', 'SequenceID': 's2',
+                'QualID': None, 'Qual': None}]
+
+        o = obs[0]
+        e = exp[0]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertTrue((o['Qual'] == e['Qual']).all())
+
+        o = obs[1]
+        e = exp[1]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertTrue((o['Qual'] == e['Qual']).all())
+
+        o = obs[2]
+        e = exp[2]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertEqual(o['Qual'], e['Qual'])
+
+        o = obs[3]
+        e = exp[3]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertEqual(o['Qual'], e['Qual'])
+
+    def test_transform(self):
+        """load should pass transform methods to the iterators"""
+        def rev_f(st):
+            st['Sequence'] = st['Sequence'][::-1]
+            st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
+
+        it = load([self.fq1gz, self.fna1], transform=rev_f, phred_offset=64)
+        obs = [rec.copy() for rec in it]
+        exp = [{'Sequence': 'CGTA', 'SequenceID': 's1',
+                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
+               {'Sequence': 'GGTTAA', 'SequenceID': 's2',
+                'QualID': 's2', 'Qual': array([40, 40, 39, 39, 39, 39])},
+               {'Sequence': 'CGTA', 'SequenceID': 's1',
+                'QualID': None, 'Qual': None},
+               {'Sequence': 'GGTTAA', 'SequenceID': 's2',
+                'QualID': None, 'Qual': None}]
+
+        o = obs[0]
+        e = exp[0]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertTrue((o['Qual'] == e['Qual']).all())
+
+        o = obs[1]
+        e = exp[1]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertTrue((o['Qual'] == e['Qual']).all())
+
+        o = obs[2]
+        e = exp[2]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertEqual(o['Qual'], e['Qual'])
+
+        o = obs[3]
+        e = exp[3]
+        self.assertEqual(o['Sequence'], e['Sequence'])
+        self.assertEqual(o['SequenceID'], e['SequenceID'])
+        self.assertEqual(o['QualID'], e['QualID'])
+        self.assertEqual(o['Qual'], e['Qual'])
+
+    def test_force_constructor(self):
+        it = load([self.noext], constructor=FastaIterator)
+        obs = [rec.copy() for rec in it]
+        exp = [{'Sequence': 'AATTGG', 'SequenceID': 'seq1',
+                'Qual': None, 'QualID': None},
+               {'Sequence': 'ATATA', 'SequenceID': 'seq2',
+                'Qual': None, 'QualID': None}]
+        self.assertEqual(obs, exp)
+
+    def test_no_seqs(self):
+        for null in ('', [], (), None):
+            with self.assertRaises(ValueError):
+                load(null)
+
+    def test_unknown_filetype(self):
+        with self.assertRaises(IOError):
+            load('seqs.mpeg')
+
+    def test_file_path_does_not_exist(self):
+        with self.assertRaises(IOError):
+            load('this-seqs-file-had-better-not-exist-or-this-test-will-'
+                 'fail.fna')
+
+    def test_multiple_types_fasta_fastq_qual(self):
+        with self.assertRaises(ValueError):
+            load([self.fna1, self.fq1], qual=self.qual1)
+
+    def test_open_or_none_no_opener(self):
+        obs = _open_or_none(None, self.fna1)
+        self.assertTrue(obs is None)
+
+    def test_open_or_none_opener_error(self):
+        def bogus_opener(f):
+            raise IOError('hahaha')
+
+        with self.assertRaises(IOError):
+            _open_or_none(bogus_opener, self.fna1)
+
+    def test_is_single_iterator_type_null_case(self):
+        self.assertTrue(_is_single_iterator_type([]))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/parse/sequences/tests/test_fasta.py b/skbio/parse/sequences/tests/test_fasta.py
new file mode 100644
index 0000000..36a2196
--- /dev/null
+++ b/skbio/parse/sequences/tests/test_fasta.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+
+import tempfile
+from unittest import TestCase, main
+
+import numpy.testing as npt
+
+from skbio import parse_fasta, parse_qual
+from skbio.io import RecordError
+
+
+FASTA_PARSERS_DATA = {
+    'labels': '>abc\n>def\n>ghi\n',
+    'oneseq': '>abc\nUCAG\n',
+    'multiline': '>xyz\nUUUU\nCC\nAAAAA\nG',
+    'threeseq': '>123\na\n> \t abc  \t \ncag\ngac\n>456\nc\ng',
+    'twogood': '>123\n\n> \t abc  \t \ncag\ngac\n>456\nc\ng',
+    'oneX': '>123\nX\n> \t abc  \t \ncag\ngac\n>456\nc\ng',
+    'nolabels': 'GJ>DSJGSJDF\nSFHKLDFS>jkfs\n',
+    'empty': '',
+    'qualscores': '>x\n5 10 5\n12\n>y foo bar\n30 40\n>a   \n5 10 5\n12\n'
+                  '>b  baz\n30 40',
+    'invalidqual': '>x\n5 10 5\n12\n>y\n30 40\n>a\n5 10 5\n12 brofist 42'
+    }
+
+
+class IterableData(object):
+    """Store fasta data as lists of strings."""
+    def setUp(self):
+        for attr, val in FASTA_PARSERS_DATA.items():
+            setattr(self, attr, val.split('\n'))
+
+
+class FileData(object):
+    """Store fasta data as file names pointing to the data."""
+    def setUp(self):
+        tmp_files = []
+        for attr, val in FASTA_PARSERS_DATA.items():
+            tmp_file = tempfile.NamedTemporaryFile('r+')
+            tmp_file.write(val)
+            tmp_file.flush()
+            tmp_file.seek(0)
+            setattr(self, attr, tmp_file.name)
+            tmp_files.append(tmp_file)
+        self._tmp_files = tmp_files
+
+    def tearDown(self):
+        for tmp_file in self._tmp_files:
+            tmp_file.close()
+
+
+class ParseFastaTests(object):
+
+    """Tests of parse_fasta: returns (label, seq) tuples."""
+
+    def test_empty(self):
+        """parse_fasta should return empty list from 'file' w/o labels
+        """
+        self.assertEqual(list(parse_fasta(self.empty)), [])
+        self.assertEqual(list(parse_fasta(self.nolabels, strict=False)),
+                         [])
+        self.assertRaises(RecordError, list, parse_fasta(self.nolabels))
+
+    def test_no_labels(self):
+        """parse_fasta should return empty list from file w/o seqs"""
+        # should fail if strict (the default)
+        self.assertRaises(RecordError, list,
+                          parse_fasta(self.labels, strict=True))
+        # if not strict, should skip the records
+        self.assertEqual(list(parse_fasta(self.labels, strict=False)),
+                         [])
+
+    def test_single(self):
+        """parse_fasta should read single record as (label, seq) tuple
+        """
+        f = list(parse_fasta(self.oneseq))
+        self.assertEqual(len(f), 1)
+        a = f[0]
+        self.assertEqual(a, ('abc', 'UCAG'))
+
+        f = list(parse_fasta(self.multiline))
+        self.assertEqual(len(f), 1)
+        a = f[0]
+        self.assertEqual(a, ('xyz', 'UUUUCCAAAAAG'))
+
+    def test_gt_bracket_in_seq(self):
+        """parse_fasta handles alternate finder function
+
+            this test also illustrates how to use the parse_fasta
+            to handle "sequences" that start with a > symbol, which can
+            happen when we abuse the parse_fasta to parse
+            fasta-like sequence quality files.
+        """
+        oneseq_w_gt = '>abc\n>CAG\n'.split('\n')
+
+        def get_two_line_records(infile):
+            line1 = None
+            for line in infile:
+                if line1 is None:
+                    line1 = line
+                else:
+                    yield (line1, line)
+                    line1 = None
+        f = list(parse_fasta(oneseq_w_gt, finder=get_two_line_records))
+        self.assertEqual(len(f), 1)
+        a = f[0]
+        self.assertEqual(a, ('abc', '>CAG'))
+
+    def test_parse_fasta_ignore_comment(self):
+        """parse_fasta correct ignores label comments when requested
+        """
+        in_ = '>1\nCAG\n>2 some other info\nCCAG\n>3 \nA'.split('\n')
+        # ignore_comment = False
+        actual = list(parse_fasta(in_))
+        expected = [('1', 'CAG'), ('2 some other info', 'CCAG'), ('3', 'A')]
+        self.assertEqual(actual, expected)
+        # ignore_comment = True
+        actual = list(parse_fasta(in_, ignore_comment=True))
+        expected = [('1', 'CAG'), ('2', 'CCAG'), ('3', 'A')]
+        self.assertEqual(actual, expected)
+
+    def test_parse_fasta_label_to_name(self):
+        exp = [('brofist', 'a'), ('brofist', 'caggac'), ('brofist', 'cg')]
+
+        # the most powerful fasta label converter known to mankind
+        obs = list(parse_fasta(self.threeseq,
+                   label_to_name=lambda _: 'brofist'))
+
+        self.assertEqual(obs, exp)
+
+    def test_multiple(self):
+        """parse_fasta should read multiline records correctly"""
+        f = list(parse_fasta(self.threeseq))
+        self.assertEqual(len(f), 3)
+        a, b, c = f
+        self.assertEqual(a, ('123', 'a'))
+        self.assertEqual(b, ('abc', 'caggac'))
+        self.assertEqual(c, ('456', 'cg'))
+
+    def test_multiple_bad_strict(self):
+        with self.assertRaises(RecordError):
+            list(parse_fasta(self.twogood))
+
+    def test_multiple_bad_not_strict(self):
+        f = list(parse_fasta(self.twogood, strict=False))
+        self.assertEqual(len(f), 2)
+        a, b = f
+        self.assertEqual(a, ('abc', 'caggac'))
+
+    def test_parse_qual(self):
+        exp = [('x', [5, 10, 5, 12]), ('y', [30, 40]), ('a', [5, 10, 5, 12]),
+               ('b', [30, 40])]
+        obs = parse_qual(self.qualscores)
+
+        for o, e in zip(obs, exp):
+            npt.assert_equal(o, e)
+
+    def test_parse_qual_invalid_qual_file(self):
+        with self.assertRaises(RecordError):
+            list(parse_qual(self.invalidqual))
+
+    def test_parse_qual_full_header(self):
+        exp = [('x', [5, 10, 5, 12]), ('y foo bar', [30, 40]),
+               ('a', [5, 10, 5, 12]), ('b  baz', [30, 40])]
+        obs = parse_qual(self.qualscores, full_header=True)
+
+        for o, e in zip(obs, exp):
+            npt.assert_equal(o, e)
+
+
+class ParseFastaTestsInputIsIterable(IterableData, ParseFastaTests, TestCase):
+    """Mixin: `parse_fasta` and `parse_qual` in ParseFastaTests gets lists
+    of strings.
+
+    """
+    pass
+
+
+class ParseFastaTestsInputIsFileNames(FileData, ParseFastaTests, TestCase):
+    """Mixin: `parse_fasta` and `parse_qual` in ParseFastaTests gets a
+    file name.
+
+    """
+    pass
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/parse/sequences/tests/test_fastq.py b/skbio/parse/sequences/tests/test_fastq.py
new file mode 100644
index 0000000..0d9f74e
--- /dev/null
+++ b/skbio/parse/sequences/tests/test_fastq.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+import tempfile
+
+from numpy import array
+
+from skbio import parse_fastq
+from skbio.parse.sequences import FastqParseError
+
+
+class IterableData(object):
+    def setUp(self):
+        """ Initialize variables to be used by the tests as lists of strings"""
+        self.FASTQ_EXAMPLE = FASTQ_EXAMPLE.split('\n')
+        self.FASTQ_EXAMPLE_2 = FASTQ_EXAMPLE_2.split('\n')
+        self.FASTQ_EXAMPLE_3 = FASTQ_EXAMPLE_3.split('\n')
+
+
+class FileData(object):
+    def setUp(self):
+        """ Initialize variables to be used by the tests as file names"""
+        tmp_files = []
+        for attr, val in [('FASTQ_EXAMPLE', FASTQ_EXAMPLE),
+                          ('FASTQ_EXAMPLE_2', FASTQ_EXAMPLE_2),
+                          ('FASTQ_EXAMPLE_3', FASTQ_EXAMPLE_3)]:
+            tmp_file = tempfile.NamedTemporaryFile('w')
+            tmp_file.write(val)
+            tmp_file.flush()
+            tmp_file.seek(0)
+            setattr(self, attr, tmp_file.name)
+            tmp_files.append(tmp_file)
+        self._tmp_files = tmp_files
+
+    def tearDown(self):
+        for tmp_file in self._tmp_files:
+            tmp_file.close()
+
+
+class ParseFastqTests(object):
+    def test_parse(self):
+        for label, seq, qual in parse_fastq(self.FASTQ_EXAMPLE,
+                                            phred_offset=64):
+            self.assertTrue(label in DATA)
+            self.assertEqual(seq, DATA[label]["seq"])
+            self.assertTrue((qual == DATA[label]["qual"]).all())
+
+        # Make sure that enforce_qual_range set to False allows qual scores
+        # to fall outside the typically acceptable range of 0-62
+        for label, seq, qual in parse_fastq(self.FASTQ_EXAMPLE_2,
+                                            phred_offset=33,
+                                            enforce_qual_range=False):
+            self.assertTrue(label in DATA_2)
+            self.assertEqual(seq, DATA_2[label]["seq"])
+            self.assertTrue((qual == DATA_2[label]["qual"]).all())
+
+        # This should raise a FastqParseError since the qual scores are
+        # intended to be interpreted with an offset of 64, and using 33 will
+        # make the qual score fall outside the acceptable range of 0-62.
+        with self.assertRaises(FastqParseError):
+            list(parse_fastq(self.FASTQ_EXAMPLE, phred_offset=33))
+
+    def test_parse_error(self):
+        with self.assertRaises(FastqParseError):
+            list(parse_fastq(self.FASTQ_EXAMPLE_2, strict=True))
+
+        with self.assertRaises(FastqParseError):
+            list(parse_fastq(self.FASTQ_EXAMPLE_3, phred_offset=64))
+
+    def test_invalid_phred_offset(self):
+        with self.assertRaises(ValueError):
+            list(parse_fastq(self.FASTQ_EXAMPLE, phred_offset=42))
+
+
+class ParseFastqTestsInputIsIterable(IterableData, ParseFastqTests, TestCase):
+    pass
+
+
+class ParseFastqTestsInputIsFileNames(FileData, ParseFastqTests, TestCase):
+    pass
+
+
+DATA = {
+    "GAPC_0015:6:1:1259:10413#0/1":
+    dict(seq='AACACCAAACTTCTCCACCACGTGAGCTACAAAAG',
+         qual=array([32, 32, 32, 32, 25, 30, 20, 29, 32, 29, 35, 30, 35, 33,
+                     34, 35, 33, 35, 35, 32, 30, 12, 34, 30, 35, 35, 25, 20,
+                     28, 20, 28, 25, 28, 23, 6])),
+    "GAPC_0015:6:1:1283:11957#0/1":
+    dict(seq='TATGTATATATAACATATACATATATACATACATA',
+         qual=array([29, 11, 26, 27, 16, 25, 29, 31, 27, 25, 25, 30, 32, 32,
+                     32, 33, 35, 30, 28, 28, 32, 34, 20, 32, 32, 35, 32, 28,
+                     33, 20, 32, 32, 34, 34, 34])),
+    "GAPC_0015:6:1:1284:10484#0/1":
+    dict(seq='TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG',
+         qual=array([21, 13, 31, 29, 29, 21, 31, 29, 26, 31, 25, 30, 28, 30,
+                     30, 32, 32, 25, 29, 32, 30, 19, 26, 29, 28, 25, 34, 34,
+                     32, 30, 31, 12, 34, 12, 31])),
+    "GAPC_0015:6:1:1287:17135#0/1":
+    dict(seq='TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA',
+         qual=array([30, 33, 33, 35, 35, 35, 12, 28, 35, 35, 35, 28, 35, 28,
+                     35, 20, 11, 20, 19, 29, 11, 26, 28, 29, 29,  9, 28, 27,
+                     23, 33, 30, 20, 32, 30, 11])),
+    "GAPC_0015:6:1:1293:3171#0/1":
+    dict(seq="AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA",
+             qual=array([34, 32, 34, 34, 34, 21, 31, 27, 25, 25, 35, 33, 36,
+                         35, 36, 33, 31, 12, 34, 33, 33, 33, 34, 23, 34, 33,
+                         33, 35, 25, 35, 35, 32, 33, 30, 35])),
+    "GAPC_0015:6:1:1297:10729#0/1":
+    dict(seq="TAATGCCAAAGAAATATTTCCAAACTACATGCTTA",
+             qual=array([20, 28, 35, 35, 12, 34, 34, 32, 32, 34, 33, 35, 35,
+                         29, 31, 35, 33, 35, 35, 35, 35, 35, 12, 35, 35, 35,
+                         28, 35, 35, 20, 35, 35, 25, 12, 30])),
+    "GAPC_0015:6:1:1299:5940#0/1":
+    dict(seq="AATCAAGAAATGAAGATTTATGTATGTGAAGAATA",
+             qual=array([36, 35, 36, 36, 34, 35, 38, 38, 38, 36, 38, 38, 38,
+                         36, 32, 36, 36, 32, 30, 32, 35, 32, 15, 35, 32, 25,
+                         34, 34, 32, 30, 37, 37, 35, 36, 37])),
+    "GAPC_0015:6:1:1308:6996#0/1":
+    dict(seq="TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA",
+             qual=array([33, 29, 32, 33, 12, 25, 32, 25, 30, 30, 35, 35, 25,
+                         33, 32, 30, 30, 20, 35, 35, 11, 31, 24, 29, 28, 35,
+                         28, 35, 32, 35, 33, 20, 20, 20, 35])),
+    "GAPC_0015:6:1:1314:13295#0/1":
+    dict(seq="AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT",
+         qual=array([35, 12, 35, 35, 28, 28, 36, 36, 36, 36, 36, 33, 33, 25,
+                     36, 32, 20, 32, 32, 32, 34, 12, 25, 20, 28, 32, 33, 32,
+                     32, 32, 34, 26, 35, 35, 35])),
+    "GAPC_0015:6:1:1317:3403#0/1":
+    dict(seq="TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG",
+         # had to add space in qual line
+         qual=array([28, 28, 28, 26, 20, 25, 20, 19, 33, 12, 34, 34, 32, 32,
+                     28, 31, 21, 26, 31, 34, 34, 35, 35, 32, 35, 35, 30, 27,
+                     33, 35, 28, 33, 28, 20, 35]))
+
+}
+
+
+DATA_2 = {
+    "GAPC_0017:6:1:1259:10413#0/1":
+    dict(seq='AACACCAAACTTCTCCACCACGTGAGCTACAAAAG',
+         qual=array([63, 63, 63, 63, 56, 61, 51, 60, 63, 60, 66, 61, 66, 64,
+                     65, 66, 64, 66, 66, 63, 61, 43, 65, 61, 66, 66, 56, 51,
+                     59, 51, 59, 56, 59, 54, 37])),
+    "GAPC_0015:6:1:1283:11957#0/1":
+    dict(seq='TATGTATATATAACATATACATATATACATACATA',
+         qual=array([60, 42, 57, 58, 47, 56, 60, 62, 58, 56, 56, 61, 63, 63,
+                     63, 64, 66, 61, 59, 59, 63, 65, 51, 63, 63, 66, 63, 59,
+                     64, 51, 63, 63, 65, 65, 65]))
+}
+
+
+FASTQ_EXAMPLE = r"""@GAPC_0015:6:1:1259:10413#0/1
+AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
++GAPC_0015:6:1:1259:10413#0/1
+````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
+ at GAPC_0015:6:1:1283:11957#0/1
+TATGTATATATAACATATACATATATACATACATA
++GAPC_0015:6:1:1283:11957#0/1
+]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
+ at GAPC_0015:6:1:1284:10484#0/1
+TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG
++GAPC_0015:6:1:1284:10484#0/1
+UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_
+ at GAPC_0015:6:1:1287:17135#0/1
+TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA
++GAPC_0015:6:1:1287:17135#0/1
+^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K
+ at GAPC_0015:6:1:1293:3171#0/1
+AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA
++GAPC_0015:6:1:1293:3171#0/1
+b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c
+ at GAPC_0015:6:1:1297:10729#0/1
+TAATGCCAAAGAAATATTTCCAAACTACATGCTTA
++GAPC_0015:6:1:1297:10729#0/1
+T\ccLbb``bacc]_cacccccLccc\ccTccYL^
+ at GAPC_0015:6:1:1299:5940#0/1
+AATCAAGAAATGAAGATTTATGTATGTGAAGAATA
++GAPC_0015:6:1:1299:5940#0/1
+dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde
+ at GAPC_0015:6:1:1308:6996#0/1
+TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA
++GAPC_0015:6:1:1308:6996#0/1
+a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc
+ at GAPC_0015:6:1:1314:13295#0/1
+AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT
++GAPC_0015:6:1:1314:13295#0/1
+cLcc\\dddddaaYd`T```bLYT\`a```bZccc
+ at GAPC_0015:6:1:1317:3403#0/1
+TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG
++GAPC_0015:6:1:1317:3403#0/1
+\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc"""
+
+
+FASTQ_EXAMPLE_2 = r"""@GAPC_0017:6:1:1259:10413#0/1
+AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
++GAPC_0015:6:1:1259:10413#0/1
+````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
+ at GAPC_0015:6:1:1283:11957#0/1
+TATGTATATATAACATATACATATATACATACATA
++GAPC_0015:6:1:1283:11957#0/1
+]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
+"""
+
+
+FASTQ_EXAMPLE_3 = r"""@GAPC_0017:6:1:1259:10413#0/1
+AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
++GAPC_0015:6:1:1259:10413#0/1
+````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
+ at GAPC_0015:6:1:1283:11957#0/1
+"""
+
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/parse/sequences/tests/test_iterator.py b/skbio/parse/sequences/tests/test_iterator.py
new file mode 100644
index 0000000..fc84d6c
--- /dev/null
+++ b/skbio/parse/sequences/tests/test_iterator.py
@@ -0,0 +1,336 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+from six import StringIO
+from numpy import arange, array
+
+from skbio import SequenceIterator, FastaIterator, FastqIterator
+
+
+class SeqIterTests(TestCase):
+    def setUp(self):
+        self.seq_ok = {'SequenceID': 'foo',
+                       'Sequence': 'AATTGGCC',
+                       'QualID': None,
+                       'Qual': None}
+
+        self.seqqual_ok = {'SequenceID': 'foo',
+                           'Sequence': 'AATTGGCC',
+                           'QualID': 'foo',
+                           'Qual': arange(8)}
+
+        self.seq_bad = {'SequenceID': 'foo',
+                        'Sequence': 'AATT  GGCC',
+                        'QualID': None,
+                        'Qual': None}
+
+        self.seqqual_bad_id = {'SequenceID': 'foo',
+                               'Sequence': 'AATTGGCC',
+                               'QualID': 'bar',
+                               'Qual': arange(8)}
+
+        self.seqqual_bad_qual = {'SequenceID': 'foo',
+                                 'Sequence': 'AATTGGCC',
+                                 'QualID': 'foo',
+                                 'Qual': arange(5)}
+
+        def rev_f(st):
+            st['Sequence'] = st['Sequence'][::-1]
+            st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
+
+        self.rev_f = rev_f
+
+    def test_validate_ids_true(self):
+        wk = SequenceIterator(['aattgg'], valid_id=True)
+
+        wk.state = self.seq_ok.copy()
+        wk.validate_ids()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_ok.copy()
+        wk.validate_ids()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_bad_id.copy()
+        wk.validate_ids()
+        self.assertTrue(wk.failed)
+
+    def test_validate_ids_false(self):
+        wk = SequenceIterator(['aattgg'], valid_id=False)
+
+        wk.state = self.seq_ok.copy()
+        wk.validate_ids()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_ok.copy()
+        wk.validate_ids()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_bad_id.copy()
+        wk.validate_ids()
+        self.assertFalse(wk.failed)
+
+    def test_validate_lengths_true(self):
+        wk = SequenceIterator(['aattgg'], valid_length=True)
+
+        wk.state = self.seq_ok.copy()
+        wk.valid_lengths()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_ok.copy()
+        wk.valid_lengths()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_bad_qual.copy()
+        wk.valid_lengths()
+        self.assertTrue(wk.failed)
+
+    def test_validate_lengths_false(self):
+        wk = SequenceIterator(['aattgg'], valid_length=False)
+
+        wk.state = self.seq_ok.copy()
+        wk.valid_lengths()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_ok.copy()
+        wk.valid_lengths()
+        self.assertFalse(wk.failed)
+
+        wk.state = self.seqqual_bad_qual.copy()
+        wk.valid_lengths()
+        self.assertFalse(wk.failed)
+
+    def test_transform(self):
+        wk = SequenceIterator(['aattgg'], transform=self.rev_f)
+
+        wk.state = self.seqqual_ok.copy()
+        self.assertEqual(wk.state['Sequence'], self.seqqual_ok['Sequence'])
+        wk.transform()
+        self.assertEqual(wk.state['Sequence'],
+                         self.seqqual_ok['Sequence'][::-1])
+        self.assertTrue((wk.state['Qual'] ==
+                         self.seqqual_ok['Qual'][::-1]).all())
+
+    def test_passing_none_for_seqs(self):
+        with self.assertRaises(ValueError):
+            SequenceIterator(None)
+
+
+class FastaTests(TestCase):
+    def setUp(self):
+        self.fastas = [StringIO(fasta1), StringIO(fasta2), StringIO(fasta3)]
+        self.quals = [StringIO(qual1), StringIO(qual2), StringIO(qual3)]
+
+        self.bad_qual_val = [StringIO(qual1), StringIO(qual_bad_val),
+                             StringIO(qual3)]
+        self.bad_qual_id = [StringIO(qual1), StringIO(qual_bad_id),
+                            StringIO(qual3)]
+
+    def test_fasta_gen(self):
+        wk = FastaIterator(seq=self.fastas)
+        gen = wk()
+
+        exp1 = {'SequenceID': '1', 'Sequence': 'aattggcc', 'Qual': None,
+                'QualID': None}
+        exp2 = {'SequenceID': '2', 'Sequence': 'aattaatt', 'Qual': None,
+                'QualID': None}
+        exp3 = {'SequenceID': '3', 'Sequence': 'atat', 'Qual': None,
+                'QualID': None}
+        exp4 = {'SequenceID': '4', 'Sequence': 'attatt', 'Qual': None,
+                'QualID': None}
+        exp5 = {'SequenceID': '5', 'Sequence': 'ggccc', 'Qual': None,
+                'QualID': None}
+
+        obs1 = next(gen)
+        self.assertEqual(obs1, exp1)
+        self.assertFalse(wk.failed)
+
+        obs2 = next(gen)
+        self.assertEqual(obs2, exp2)
+        self.assertFalse(wk.failed)
+
+        obs3 = next(gen)
+        self.assertEqual(obs3, exp3)
+        self.assertFalse(wk.failed)
+
+        obs4 = next(gen)
+        self.assertEqual(obs4, exp4)
+        self.assertFalse(wk.failed)
+
+        obs5 = next(gen)
+        self.assertEqual(obs5, exp5)
+        self.assertFalse(wk.failed)
+
+    def test_fasta_qual(self):
+        wk = FastaIterator(seq=self.fastas, qual=self.quals)
+        gen = wk()
+
+        exp1 = {'SequenceID': '1', 'Sequence': 'aattggcc',
+                'Qual': arange(1, 9), 'QualID': '1'}
+        exp2 = {'SequenceID': '2', 'Sequence': 'aattaatt', 'QualID': '2',
+                'Qual': arange(1, 9)[::-1]}
+        exp3 = {'SequenceID': '3', 'Sequence': 'atat', 'Qual': arange(1, 5),
+                'QualID': '3'}
+        exp4 = {'SequenceID': '4', 'Sequence': 'attatt', 'Qual': arange(1, 7),
+                'QualID': '4'}
+        exp5 = {'SequenceID': '5', 'Sequence': 'ggccc', 'Qual': arange(1, 6),
+                'QualID': '5'}
+
+        obs1 = next(gen)
+        self.assertTrue((obs1['Qual'] == exp1['Qual']).all())
+        obs1.pop('Qual')
+        exp1.pop('Qual')
+        self.assertEqual(obs1, exp1)
+        self.assertFalse(wk.failed)
+
+        obs2 = next(gen)
+        self.assertTrue((obs2['Qual'] == exp2['Qual']).all())
+        obs2.pop('Qual')
+        exp2.pop('Qual')
+        self.assertEqual(obs2, exp2)
+        self.assertFalse(wk.failed)
+
+        obs3 = next(gen)
+        self.assertTrue((obs3['Qual'] == exp3['Qual']).all())
+        obs3.pop('Qual')
+        exp3.pop('Qual')
+        self.assertEqual(obs3, exp3)
+        self.assertFalse(wk.failed)
+
+        obs4 = next(gen)
+        self.assertTrue((obs4['Qual'] == exp4['Qual']).all())
+        obs4.pop('Qual')
+        exp4.pop('Qual')
+        self.assertEqual(obs4, exp4)
+        self.assertFalse(wk.failed)
+
+        obs5 = next(gen)
+        self.assertTrue((obs5['Qual'] == exp5['Qual']).all())
+        obs5.pop('Qual')
+        exp5.pop('Qual')
+        self.assertEqual(obs5, exp5)
+        self.assertFalse(wk.failed)
+
+    def test_fasta_badqual_val(self):
+        wk = FastaIterator(seq=self.fastas, qual=self.bad_qual_val)
+        gen = wk()
+
+        # default behavior is to sliently ignore
+        exp_ids = ['1', '2', '4', '5']
+        obs_ids = [r['SequenceID'] for r in gen]
+
+        self.assertEqual(obs_ids, exp_ids)
+
+    def test_fasta_badqual_id(self):
+        wk = FastaIterator(seq=self.fastas, qual=self.bad_qual_id)
+        gen = wk()
+
+        # default behavior is to sliently ignore
+        exp_ids = ['1', '2', '4', '5']
+        obs_ids = [r['SequenceID'] for r in gen]
+
+        self.assertEqual(obs_ids, exp_ids)
+
+
+class FastqTests(TestCase):
+    def setUp(self):
+        self.fastqs = [StringIO(fastq1), StringIO(fastq2)]
+
+    def test_fastq_gen(self):
+        wk = FastqIterator(seq=self.fastqs)
+        gen = wk()
+
+        exp1 = {'SequenceID': '1', 'Sequence': 'atat', 'QualID': '1',
+                'Qual': array([32, 33, 34, 35])}
+        exp2 = {'SequenceID': '2', 'Sequence': 'atgc', 'QualID': '2',
+                'Qual': array([33, 34, 35, 36])}
+        exp3 = {'SequenceID': '3', 'Sequence': 'taa', 'QualID': '3',
+                'Qual': array([36, 37, 38])}
+
+        obs1 = next(gen)
+        self.assertTrue((obs1['Qual'] == exp1['Qual']).all())
+        obs1.pop('Qual')
+        exp1.pop('Qual')
+        self.assertEqual(obs1, exp1)
+
+        obs2 = next(gen)
+        self.assertTrue((obs2['Qual'] == exp2['Qual']).all())
+        obs2.pop('Qual')
+        exp2.pop('Qual')
+        self.assertEqual(obs2, exp2)
+
+        obs3 = next(gen)
+        self.assertTrue((obs3['Qual'] == exp3['Qual']).all())
+        obs3.pop('Qual')
+        exp3.pop('Qual')
+        self.assertEqual(obs3, exp3)
+
+
+fasta1 = """>1
+aattggcc
+>2
+aattaatt
+"""
+
+fasta2 = """>3
+atat
+"""
+
+fasta3 = """>4
+attatt
+>5
+ggccc
+"""
+
+qual1 = """>1
+1 2 3 4 5 6 7 8
+>2
+8 7 6 5 4 3 2 1
+"""
+
+qual2 = """>3
+1 2 3 4
+"""
+
+qual3 = """>4
+1 2 3 4 5 6
+>5
+1 2 3 4 5
+"""
+
+qual_bad_val = """>3
+1 2
+"""
+
+qual_bad_id = """>asdasd
+1 2 3 4
+"""
+
+fastq1 = """@1
+atat
++
+ABCD
+ at 2
+atgc
++
+BCDE
+"""
+
+fastq2 = """@3
+taa
++
+EFG
+"""
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/parse/tests/__init__.py b/skbio/parse/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/parse/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/parse/tests/test_record.py b/skbio/parse/tests/test_record.py
new file mode 100644
index 0000000..92a4188
--- /dev/null
+++ b/skbio/parse/tests/test_record.py
@@ -0,0 +1,550 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+from skbio.parse.record import (DelimitedSplitter,
+                                GenericRecord, MappedRecord, TypeSetter,
+                                list_adder, dict_adder,
+                                LineOrientedConstructor, int_setter,
+                                bool_setter, string_and_strip, FieldWrapper,
+                                StrictFieldWrapper, raise_unknown_field,
+                                FieldMorpher)
+from skbio.io import FieldError
+
+
+class recordsTests(TestCase):
+
+    """Tests of top-level functionality in records."""
+
+    def test_string_and_strip(self):
+        """string_and_strip should convert all items to strings and strip them
+        """
+        self.assertEqual(string_and_strip(), [])
+        self.assertEqual(string_and_strip('\t', ' ', '\n\t'), ['', '', ''])
+        self.assertEqual(string_and_strip('\ta\tb', 3, '   cde   e', None),
+                         ['a\tb', '3', 'cde   e', 'None'])
+
+    def test_raise_unknown_field(self):
+        """raise_unknown_field should always raise FieldError"""
+        self.assertRaises(FieldError, raise_unknown_field, 'xyz', 123)
+
+
+class DelimitedSplitterTests(TestCase):
+
+    """Tests of the DelimitedSplitter factory function."""
+
+    def test_parsers(self):
+        """DelimitedSplitter should return function with correct behavior"""
+        empty = DelimitedSplitter()
+        space = DelimitedSplitter(None)
+        semicolon = DelimitedSplitter(';')
+        twosplits = DelimitedSplitter(';', 2)
+        allsplits = DelimitedSplitter(';', None)
+        lastone = DelimitedSplitter(';', -1)
+        lasttwo = DelimitedSplitter(';', -2)
+
+        self.assertEqual(empty('a   b  c'), ['a', 'b  c'])
+        self.assertEqual(empty('abc'), ['abc'])
+        self.assertEqual(empty('   '), [])
+
+        self.assertEqual(empty('a  b  c'), space('a  b  c'))
+        self.assertEqual(semicolon('  a  ; b   ;  c  d'), ['a', 'b   ;  c  d'])
+        self.assertEqual(twosplits('  a  ; b   ;  c  d'), ['a', 'b', 'c  d'])
+        self.assertEqual(allsplits(' a ;  b  ; c;;d;e  ;'),
+                         ['a', 'b', 'c', '', 'd', 'e', ''])
+        self.assertEqual(lastone(' a ;  b  ; c;;d;e  ;'),
+                         ['a ;  b  ; c;;d;e', ''])
+        self.assertEqual(lasttwo(' a ;  b  ; c;;d;e  ;'),
+                         ['a ;  b  ; c;;d', 'e', ''])
+        self.assertEqual(lasttwo(''), [])
+        self.assertEqual(lasttwo('x'), ['x'])
+        self.assertEqual(lasttwo('x;'), ['x', ''])
+
+
+class GenericRecordTests(TestCase):
+
+    """Tests of the GenericRecord class"""
+    class gr(GenericRecord):
+        Required = {'a': 'x', 'b': [], 'c': {}}
+
+    def test_init(self):
+        """GenericRecord init should work OK empty or with data"""
+        self.assertEqual(GenericRecord(), {})
+        self.assertEqual(GenericRecord({'a': 1}), {'a': 1})
+        assert isinstance(GenericRecord(), GenericRecord)
+
+    def test_init_subclass(self):
+        """GenericRecord subclass init should include required data"""
+        self.assertEqual(self.gr(), {'a': 'x', 'b': [], 'c': {}})
+        self.assertEqual(self.gr({'a': []}), {'a': [], 'b': [], 'c': {}})
+        assert isinstance(self.gr(), self.gr)
+        assert isinstance(self.gr(), GenericRecord)
+
+    def test_delitem(self):
+        """GenericRecord delitem should fail if item required"""
+        g = self.gr()
+        g['d'] = 3
+        self.assertEqual(g, {'a': 'x', 'b': [], 'c': {}, 'd': 3})
+        del g['d']
+        self.assertEqual(g, {'a': 'x', 'b': [], 'c': {}})
+        self.assertRaises(AttributeError, g.__delitem__, 'a')
+        g['c'][3] = 4
+        self.assertEqual(g['c'], {3: 4})
+
+    def test_copy(self):
+        """GenericRecord copy should include attributes and set correct class
+        """
+        g = self.gr()
+        g['a'] = 'abc'
+        g.X = 'y'
+        h = g.copy()
+        self.assertEqual(g, h)
+        assert isinstance(h, self.gr)
+        self.assertEqual(h.X, 'y')
+        self.assertEqual(h, {'a': 'abc', 'b': [], 'c': {}})
+
+
+class MappedRecordTests(TestCase):
+
+    """Tests of the MappedRecord class"""
+
+    def setUp(self):
+        """Define a few standard MappedRecords"""
+        self.empty = MappedRecord()
+        self.single = MappedRecord({'a': 3})
+        self.several = MappedRecord(a=4, b=5, c='a', d=[1, 2, 3])
+
+    def test_init_empty(self):
+        """MappedRecord empty init should work OK"""
+        g = MappedRecord()
+        self.assertEqual(g, {})
+
+    def test_init_data(self):
+        """MappedRecord should work like normal dict init"""
+        exp = {'a': 3, 'b': 4}
+        self.assertEqual(MappedRecord({'a': 3, 'b': 4}), exp)
+        self.assertEqual(MappedRecord(a=3, b=4), exp)
+        self.assertEqual(MappedRecord([['a', 3], ['b', 4]]), exp)
+
+    def test_init_subclass(self):
+        """MappedRecord subclasses should behave as expected"""
+        class rec(MappedRecord):
+            Required = {'a': {}, 'b': 'xyz', 'c': 3}
+            Aliases = {'B': 'b'}
+
+        r = rec()
+        self.assertEqual(r, {'a': {}, 'b': 'xyz', 'c': 3})
+        # test that subclassing is correct
+        s = r.copy()
+        assert isinstance(s, rec)
+        # test Aliases
+        s.B = 0
+        self.assertEqual(s, {'a': {}, 'b': 0, 'c': 3})
+        # test Required
+        try:
+            del s.B
+        except AttributeError:
+            pass
+        else:
+            raise AssertionError("Subclass failed to catch requirement")
+
+    def test_getattr(self):
+        """MappedRecord getattr should look in dict after real attrs"""
+        s = self.several
+        self.assertEqual(s.Aliases, {})
+        self.assertEqual(s.a, 4)
+        self.assertEqual(s.d, [1, 2, 3])
+        for key in s:
+            self.assertEqual(getattr(s, key), s[key])
+        assert 'xyz' not in s
+        self.assertEqual(s.xyz, None)
+        self.assertEqual(s['xyz'], None)
+        s.Aliases = {'xyz': 'a'}
+        self.assertEqual(s['xyz'], 4)
+
+    def test_setattr(self):
+        """MappedRecord setattr should add to dict"""
+        s = self.single
+        # check that we haven't screwed up normal attribute setting
+        assert 'Aliases' not in s
+        s.Aliases = {'x': 'y'}
+        assert 'Aliases' not in s
+        self.assertEqual(s.Aliases, {'x': 'y'})
+        s.x = 5
+        assert 'x' in s
+        self.assertEqual(s['x'], 5)
+        self.assertEqual(s.x, 5)
+        s.Aliases = {'XYZ': 'b'}
+        s.XYZ = 3
+        self.assertEqual(s.b, 3)
+
+    def test_delattr(self):
+        """MappedRecord delattr should work for 'normal' and other attributes
+        """
+        s = self.single
+        s.__dict__['x'] = 'y'
+        assert 'x' not in s
+        self.assertEqual(s.x, 'y')
+        del s.x
+        self.assertEqual(s.x, None)
+        self.assertEqual(s, {'a': 3})
+        # try it for an internal attribute: check it doesn't delete anything
+        # else
+        s.b = 4
+        self.assertEqual(s, {'a': 3, 'b': 4})
+        del s.a
+        self.assertEqual(s, {'b': 4})
+        del s.abc
+        self.assertEqual(s, {'b': 4})
+        s.Required = {'b': True}
+        try:
+            del s.b
+        except AttributeError:
+            pass
+        else:
+            raise AssertionError("Allowed deletion of required attribute""")
+        s.a = 3
+        self.assertEqual(s.a, 3)
+        s.Aliases = {'xyz': 'a'}
+        del s.xyz
+        self.assertEqual(s.a, None)
+
+    def test_getitem(self):
+        """MappedRecord getitem should work only for keys, not attributes"""
+        s = self.single
+        self.assertEqual(s['Required'], None)
+        self.assertEqual(s['a'], 3)
+        self.assertEqual(s['xyz'], None)
+        self.assertEqual(s[list('abc')], None)
+        s.Aliases = {'xyz': 'a'}
+        self.assertEqual(s['xyz'], 3)
+
+    def test_setitem(self):
+        """MappedRecord setitem should work only for keys, not attributes"""
+        s = self.single
+        s['Required'] = None
+        self.assertEqual(s, {'a': 3, 'Required': None})
+        self.assertEqual(s.Required, {})
+        self.assertNotEqual(s.Required, None)
+        s['c'] = 5
+        self.assertEqual(s, {'a': 3, 'c': 5, 'Required': None})
+        # still not allowed unhashable objects as keys
+        self.assertRaises(TypeError, s.__setitem__, range(3))
+        s.Aliases = {'C': 'c'}
+        s['C'] = 3
+        self.assertEqual(s, {'a': 3, 'c': 3, 'Required': None})
+
+    def test_delitem(self):
+        """MappedRecord delitem should only work for keys, not attributes"""
+        s = self.single
+        del s['Required']
+        self.assertEqual(s.Required, {})
+        s.Required = {'a': True}
+        try:
+            del s['a']
+        except AttributeError:
+            pass
+        else:
+            raise AssertionError("Allowed deletion of required item")
+        s.Aliases = {'B': 'b'}
+        s.b = 5
+        self.assertEqual(s.b, 5)
+        del s.B
+        self.assertEqual(s.b, None)
+
+    def test_contains(self):
+        """MappedRecord contains should use aliases, but not apply to attrs"""
+        s = self.single
+        assert 'a' in s
+        assert 'b' not in s
+        s.b = 5
+        assert 'b' in s
+        assert 'Required' not in s
+        assert 'A' not in s
+        s.Aliases = {'A': 'a'}
+        assert 'A' in s
+
+    def test_get(self):
+        """MappedRecord get should be typesafe against unhashables"""
+        s = self.single
+        self.assertEqual(s.get(1, 6), 6)
+        self.assertEqual(s.get('a', 'xyz'), 3)
+        self.assertEqual(s.get('ABC', 'xyz'), 'xyz')
+        s.Aliases = {'ABC': 'a'}
+        self.assertEqual(s.get('ABC', 'xyz'), 3)
+        self.assertEqual(s.get([1, 2, 3], 'x'), 'x')
+
+    def test_setdefault(self):
+        """MappedRecord setdefault should not be typesafe against unhashables
+        """
+        s = self.single
+        x = s.setdefault('X', 'xyz')
+        self.assertEqual(x, 'xyz')
+        self.assertEqual(s, {'a': 3, 'X': 'xyz'})
+        self.assertRaises(TypeError, s.setdefault, ['a', 'b'], 'xyz')
+
+    def test_update(self):
+        """MappedRecord update should transparently convert keys"""
+        s = self.single
+        s.b = 999
+        s.Aliases = {'XYZ': 'x', 'ABC': 'a'}
+        d = {'ABC': 111, 'CVB': 222}
+        s.update(d)
+        self.assertEqual(s, {'a': 111, 'b': 999, 'CVB': 222})
+
+    def test_copy(self):
+        """MappedRecord copy should return correct class"""
+        s = self.single
+        t = s.copy()
+        assert isinstance(t, MappedRecord)
+        s.Aliases = {'XYZ': 'x'}
+        u = s.copy()
+        u.Aliases['ABC'] = 'a'
+        self.assertEqual(s.Aliases, {'XYZ': 'x'})
+        self.assertEqual(t.Aliases, {})
+        self.assertEqual(u.Aliases, {'XYZ': 'x', 'ABC': 'a'})
+
+    def test_subclass(self):
+        """MappedRecord subclassing should work correctly"""
+        class ret3(MappedRecord):
+            DefaultValue = 3
+            ClassData = 'xyz'
+
+        x = ret3({'ABC': 777, 'DEF': '999'})
+        self.assertEqual(x.ZZZ, 3)
+        self.assertEqual(x.ABC, 777)
+        self.assertEqual(x.DEF, '999')
+        self.assertEqual(x.ClassData, 'xyz')
+        x.ZZZ = 6
+        self.assertEqual(x.ZZZ, 6)
+        self.assertEqual(x.ZZ, 3)
+        x.ClassData = 'qwe'
+        self.assertEqual(x.ClassData, 'qwe')
+        self.assertEqual(ret3.ClassData, 'xyz')
+
+    def test_DefaultValue(self):
+        """MappedRecord DefaultValue should give new copy when requested"""
+        class m(MappedRecord):
+            DefaultValue = []
+
+        a = m()
+        b = m()
+        assert a['abc'] is not b['abc']
+        assert a['abc'] == b['abc']
+
+
+class dummy(object):
+
+    """Do-nothing class whose attributes can be freely abused."""
+    pass
+
+
+class TypeSetterTests(TestCase):
+
+    """Tests of the TypeSetter class"""
+
+    def test_setter_empty(self):
+        """TypeSetter should set attrs to vals on empty init"""
+        d = dummy()
+        ident = TypeSetter()
+        ident(d, 'x', 'abc')
+        self.assertEqual(d.x, 'abc')
+        ident(d, 'y', 3)
+        self.assertEqual(d.y, 3)
+        ident(d, 'x', 2)
+        self.assertEqual(d.x, 2)
+
+    def test_setter_typed(self):
+        """TypeSetter should set attrs to constructor(val) when specified"""
+        d = dummy()
+        i = TypeSetter(int)
+        i(d, 'zz', 3)
+        self.assertEqual(d.zz, 3)
+        i(d, 'xx', '456')
+        self.assertEqual(d.xx, 456)
+
+
+class TypeSetterLikeTests(TestCase):
+
+    """Tests of the functions that behave similarly to TypeSetter products"""
+
+    def test_list_adder(self):
+        """list_adder should add items to list, creating if necessary"""
+        d = dummy()
+        list_adder(d, 'x', 3)
+        self.assertEqual(d.x, [3])
+        list_adder(d, 'x', 'abc')
+        self.assertEqual(d.x, [3, 'abc'])
+        list_adder(d, 'y', [2, 3])
+        self.assertEqual(d.x, [3, 'abc'])
+        self.assertEqual(d.y, [[2, 3]])
+
+    def test_dict_adder(self):
+        """dict_adder should add items to dict, creating if necessary"""
+        d = dummy()
+        dict_adder(d, 'x', 3)
+        self.assertEqual(d.x, {3: None})
+        dict_adder(d, 'x', 'ab')
+        self.assertEqual(d.x, {3: None, 'a': 'b'})
+        dict_adder(d, 'x', ['a', 0])
+        self.assertEqual(d.x, {3: None, 'a': 0})
+        dict_adder(d, 'y', None)
+        self.assertEqual(d.x, {3: None, 'a': 0})
+        self.assertEqual(d.y, {None: None})
+
+
+class LineOrientedConstructorTests(TestCase):
+
+    """Tests of the LineOrientedConstructor class"""
+
+    def test_init_empty(self):
+        """LOC empty init should succeed with expected defaults"""
+        l = LineOrientedConstructor()
+        self.assertEqual(l.Lines, [])
+        self.assertEqual(l.LabelSplitter(' ab  cd  '), ['ab', 'cd'])
+        self.assertEqual(l.FieldMap, {})
+        self.assertEqual(l.Constructor, MappedRecord)
+        self.assertEqual(l.Strict, False)
+
+    def test_empty_LOC(self):
+        """LOC empty should fail if strict, fill fields if not strict"""
+        data = ["abc   def", "3  n", "\t  abc   \txyz\n\n", "fgh   "]
+        l = LineOrientedConstructor()
+        result = l()
+        self.assertEqual(result, {})
+        result = l([])
+        self.assertEqual(result, {})
+        result = l(['   ', '\n\t   '])
+        self.assertEqual(result, {})
+        result = l(data)
+        self.assertEqual(result, {'abc': 'xyz', '3': 'n', 'fgh': None})
+
+    def test_full_LOC(self):
+        """LOC should behave as expected when initialized with rich data"""
+        data = ["abc\t def", " 3 \t n", "  abc   \txyz\n\n", "x\t5", "fgh   ",
+                "x\t3    "]
+
+        class rec(MappedRecord):
+            Required = {'abc': []}
+        maps = {'abc': list_adder, 'x': int_setter, 'fgh': bool_setter}
+        label_splitter = DelimitedSplitter('\t')
+        constructor = rec
+        strict = True
+        loc_bad = LineOrientedConstructor(data, label_splitter, maps,
+                                          constructor, strict)
+        self.assertRaises(FieldError, loc_bad)
+        strict = False
+        loc_good = LineOrientedConstructor(data, label_splitter, maps,
+                                           constructor, strict)
+        result = loc_good()
+        assert isinstance(result, rec)
+        self.assertEqual(result,
+                         {'abc': ['def', 'xyz'], '3': 'n',
+                          'fgh': False, 'x': 3})
+
+
+class fake_dict(dict):
+
+    """Test that constructors return the correct subclass"""
+    pass
+
+
+class FieldWrapperTests(TestCase):
+
+    """Tests of the FieldWrapper factory function"""
+
+    def test_default(self):
+        """Default FieldWrapper should wrap fields and labels"""
+        fields = list('abcde')
+        f = FieldWrapper(fields)
+        self.assertEqual(f(''), {})
+        self.assertEqual(f('xy za '), {'a': 'xy', 'b': 'za'})
+        self.assertEqual(f('1   2\t\t 3  \n4 5 6'),
+                         {'a': '1', 'b': '2', 'c': '3', 'd': '4', 'e': '5'})
+
+    def test_splitter(self):
+        """FieldWrapper with splitter should use that splitter"""
+        fields = ['label', 'count']
+        splitter = DelimitedSplitter(':', -1)
+        f = FieldWrapper(fields, splitter)
+        self.assertEqual(f(''), {})
+        self.assertEqual(f('nknasd:'), {'label': 'nknasd', 'count': ''})
+        self.assertEqual(
+            f('n:k:n:a:sd  '),
+            {'label': 'n:k:n:a',
+             'count': 'sd'})
+
+    def test_constructor(self):
+        """FieldWrapper with constructor should use that constructor"""
+        fields = list('abc')
+        f = FieldWrapper(fields, constructor=fake_dict)
+        self.assertEqual(f('x y'), {'a': 'x', 'b': 'y'})
+        assert isinstance(f('x y'), fake_dict)
+
+
+class StrictFieldWrapperTests(TestCase):
+
+    """Tests of the StrictFieldWrapper factory function"""
+
+    def test_default(self):
+        """Default StrictFieldWrapper should wrap fields if count correct"""
+        fields = list('abcde')
+        f = StrictFieldWrapper(fields)
+        self.assertEqual(f('1   2\t\t 3  \n4 5 '),
+                         {'a': '1', 'b': '2', 'c': '3', 'd': '4', 'e': '5'})
+        self.assertRaises(FieldError, f, '')
+        self.assertRaises(FieldError, f, 'xy za ')
+
+    def test_splitter(self):
+        """StrictFieldWrapper with splitter should use that splitter"""
+        fields = ['label', 'count']
+        splitter = DelimitedSplitter(':', -1)
+        f = StrictFieldWrapper(fields, splitter)
+        self.assertEqual(
+            f('n:k:n:a:sd  '),
+            {'label': 'n:k:n:a',
+             'count': 'sd'})
+        self.assertEqual(f('nknasd:'), {'label': 'nknasd', 'count': ''})
+        self.assertRaises(FieldError, f, '')
+
+    def test_constructor(self):
+        """StrictFieldWrapper with constructor should use that constructor"""
+        fields = list('ab')
+        f = StrictFieldWrapper(fields, constructor=fake_dict)
+        self.assertEqual(f('x y'), {'a': 'x', 'b': 'y'})
+        assert isinstance(f('x y'), fake_dict)
+
+
+class FieldMorpherTests(TestCase):
+
+    """Tests of the FieldMorpher class."""
+
+    def test_default(self):
+        """FieldMorpher default should use correct constructors"""
+        fm = FieldMorpher({'a': int, 'b': str})
+        self.assertEqual(fm({'a': '3', 'b': 456}), {'a': 3, 'b': '456'})
+
+    def test_default_error(self):
+        """FieldMorpher default should raise FieldError on unknown fields"""
+        fm = FieldMorpher({'a': int, 'b': str})
+        self.assertRaises(FieldError, fm, {'a': '3', 'b': 456, 'c': '4'})
+
+    def test_altered_default(self):
+        """FieldMorpher with default set should apply it"""
+        def func(x, y):
+            return str(x), float(y) - 0.5
+
+        fm = FieldMorpher({'3': str, 4: int}, func)
+        # check that recognized values aren't tampered with
+        self.assertEqual(fm({3: 3, 4: '4'}), {'3': '3', 4: 4})
+        # check that unrecognized values get the appropriate conversion
+        self.assertEqual(fm({3: 3, 5: '5'}), {'3': '3', '5': 4.5})
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/parse/tests/test_record_finder.py b/skbio/parse/tests/test_record_finder.py
new file mode 100644
index 0000000..77c543c
--- /dev/null
+++ b/skbio/parse/tests/test_record_finder.py
@@ -0,0 +1,257 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+from skbio.io import RecordError
+from skbio.parse.record_finder import (DelimitedRecordFinder,
+                                       LabeledRecordFinder, LineGrouper,
+                                       TailedRecordFinder)
+
+
+class TailedRecordFinderTests(TestCase):
+    def setUp(self):
+        self.endswith_period = lambda x: x.endswith('.')
+        self.period_tail_finder = TailedRecordFinder(self.endswith_period)
+
+    def test_parsers(self):
+        lines = '>abc\ndef\nz.\n>efg\nz.'.split()
+        fl = self.period_tail_finder
+        self.assertEqual(list(fl(lines)),
+                         [['>abc', 'def', 'z.'], ['>efg', 'z.']])
+
+    def test_parsers_empty(self):
+        fl = self.period_tail_finder
+        self.assertEqual(list(fl(['  ', '\n'])), [])
+        self.assertEqual(list(fl([])), [])
+
+    def test_parsers_strip(self):
+        fl = self.period_tail_finder
+        lines = '>abc  \n \t def\n  z. \t\n>efg \nz.'.split('\n')
+        self.assertEqual(list(fl(lines)),
+                         [['>abc', ' \t def', '  z.'], ['>efg', 'z.']])
+
+    def test_parsers_leftover(self):
+        f = self.period_tail_finder
+        good = ['abc  \n',
+                'def\n',
+                '.\n',
+                'ghi \n',
+                'j.',
+                ]
+        blank = ['', '   ', '\t    \t\n\n']
+        bad = ['abc']
+
+        result = [['abc', 'def', '.'], ['ghi', 'j.']]
+
+        self.assertEqual(list(f(good)), result)
+        self.assertEqual(list(f(good + blank)), result)
+        self.assertRaises(RecordError, list, f(good + bad))
+
+        f2 = TailedRecordFinder(self.endswith_period, strict=False)
+        self.assertEqual(list(f2(good + bad)), result + [['abc']])
+
+    def test_parsers_ignore(self):
+        def never(line):
+            return False
+
+        def ignore_labels(line):
+            return (not line) or line.isspace() or line.startswith('#')
+
+        lines = ['abc', '\n', '1.', 'def', '#ignore', '2.']
+        self.assertEqual(list(TailedRecordFinder(self.endswith_period)(lines)),
+                         [['abc', '1.'], ['def', '#ignore', '2.']])
+        self.assertEqual(list(TailedRecordFinder(self.endswith_period,
+                                                 ignore=never)(lines)),
+                         [['abc', '', '1.'], ['def', '#ignore', '2.']])
+        self.assertEqual(list(TailedRecordFinder(self.endswith_period,
+                                                 ignore=ignore_labels)(lines)),
+                         [['abc', '1.'], ['def', '2.']])
+
+
+class DelimitedRecordFinderTests(TestCase):
+    def test_parsers(self):
+        lines = 'abc\ndef\n//\nefg\n//'.split()
+        self.assertEqual(list(DelimitedRecordFinder('//')(lines)),
+                         [['abc', 'def', '//'], ['efg', '//']])
+        self.assertEqual(list(DelimitedRecordFinder('//', keep_delimiter=False)
+                              (lines)),
+                         [['abc', 'def'], ['efg']])
+
+    def test_parsers_empty(self):
+        self.assertEqual(list(DelimitedRecordFinder('//')(['  ', '\n'])), [])
+        self.assertEqual(list(DelimitedRecordFinder('//')([])), [])
+
+    def test_parsers_strip(self):
+        lines = '  \t   abc  \n \t   def\n  // \t\n\t\t efg \n//'.split('\n')
+        self.assertEqual(list(DelimitedRecordFinder('//')(lines)),
+                         [['abc', 'def', '//'], ['efg', '//']])
+
+    def test_parsers_error(self):
+        good = ['  \t   abc  \n',
+                '\t   def\n',
+                '// \t\n',
+                '\t\n',
+                '\t efg \n',
+                '\t\t//\n',
+                ]
+        blank = ['', '   ', '\t    \t\n\n']
+        bad = ['abc']
+
+        result = [['abc', 'def', '//'], ['efg', '//']]
+        r = DelimitedRecordFinder('//')
+
+        self.assertEqual(list(r(good)), result)
+        self.assertEqual(list(r(good + blank)), result)
+        try:
+            list(r(good + bad))
+        except RecordError:
+            pass
+        else:
+            raise AssertionError("Parser failed to raise error on bad data")
+
+        r = DelimitedRecordFinder('//', strict=False)
+        self.assertEqual(list(r(good + bad)), result + [['abc']])
+
+    def test_parsers_ignore(self):
+        def never(line):
+            return False
+
+        def ignore_labels(line):
+            return (not line) or line.isspace() or line.startswith('#')
+
+        lines = ['>abc', '\n', '1', '$$', '>def', '#ignore', '2', '$$']
+        self.assertEqual(list(DelimitedRecordFinder('$$')(lines)),
+                         [['>abc', '1', '$$'], ['>def', '#ignore', '2', '$$']])
+        self.assertEqual(list(DelimitedRecordFinder('$$',
+                                                    ignore=never)(lines)),
+                         [['>abc', '', '1', '$$'],
+                          ['>def', '#ignore', '2', '$$']])
+        self.assertEqual(
+            list(DelimitedRecordFinder('$$', ignore=ignore_labels)(lines)),
+            [['>abc', '1', '$$'], ['>def', '2', '$$']])
+
+
+class LabeledRecordFinderTests(TestCase):
+    def setUp(self):
+        self.FastaLike = LabeledRecordFinder(lambda x: x.startswith('>'))
+
+    def test_parsers(self):
+        lines = '>abc\ndef\n//\n>efg\n//'.split()
+        fl = self.FastaLike
+        self.assertEqual(list(fl(lines)),
+                         [['>abc', 'def', '//'], ['>efg', '//']])
+
+    def test_parsers_empty(self):
+        fl = self.FastaLike
+        self.assertEqual(list(fl(['  ', '\n'])), [])
+        self.assertEqual(list(fl([])), [])
+
+    def test_parsers_strip(self):
+        fl = self.FastaLike
+        lines = '  \t   >abc  \n \t   def\n  // \t\n\t\t >efg \n//'.split('\n')
+        self.assertEqual(list(fl(lines)),
+                         [['>abc', 'def', '//'], ['>efg', '//']])
+
+    def test_parsers_leftover(self):
+        fl = self.FastaLike
+        good = ['  \t   >abc  \n',
+                '\t   def\n',
+                '\t\n',
+                '\t >efg \n',
+                'ghi',
+                ]
+        blank = ['', '   ', '\t    \t\n\n']
+        bad = ['>abc']
+
+        result = [['>abc', 'def'], ['>efg', 'ghi']]
+
+        self.assertEqual(list(fl(good)), result)
+        self.assertEqual(list(fl(good + blank)), result)
+        self.assertEqual(list(fl(good + bad)), result + [['>abc']])
+
+    def test_parsers_ignore(self):
+        def never(line):
+            return False
+
+        def ignore_labels(line):
+            return (not line) or line.isspace() or line.startswith('#')
+
+        def is_start(line):
+            return line.startswith('>')
+
+        lines = ['>abc', '\n', '1', '>def', '#ignore', '2']
+        self.assertEqual(list(LabeledRecordFinder(is_start)(lines)),
+                         [['>abc', '1'], ['>def', '#ignore', '2']])
+        self.assertEqual(list(LabeledRecordFinder(is_start,
+                                                  ignore=never)(lines)),
+                         [['>abc', '', '1'], ['>def', '#ignore', '2']])
+        self.assertEqual(list(LabeledRecordFinder(is_start,
+                                                  ignore=ignore_labels)(
+            lines)),
+            [['>abc', '1'], ['>def', '2']])
+
+    def test_constructor_is_none(self):
+        lrf = LabeledRecordFinder(lambda x: x.strip().startswith('>'),
+                                  constructor=None)
+        lines = '  \t   >abc  \n \t   def\n  // \t\n\t\t >efg \n//'.split('\n')
+
+        obs = list(lrf(lines))
+        exp = [['  \t   >abc  ', ' \t   def', '  // \t'], ['\t\t >efg ', '//']]
+        self.assertEqual(obs, exp)
+
+
+class LineGrouperTests(TestCase):
+    def test_parser(self):
+        good = ['  \t   >abc  \n',
+                '\t   def\n',
+                '\t\n',
+                '\t >efg \n',
+                'ghi',
+                ]
+        c = LineGrouper(2)
+        self.assertEqual(list(c(good)), [['>abc', 'def'], ['>efg', 'ghi']])
+        c = LineGrouper(1)
+        self.assertEqual(list(c(good)), [['>abc'], ['def'], ['>efg'], ['ghi']])
+        c = LineGrouper(4)
+        self.assertEqual(list(c(good)), [['>abc', 'def', '>efg', 'ghi']])
+        # shouldn't work if not evenly divisible
+        c = LineGrouper(3)
+        self.assertRaises(RecordError, list, c(good))
+
+    def test_parser_ignore(self):
+        def never(line):
+            return False
+
+        def ignore_labels(line):
+            return (not line) or line.isspace() or line.startswith('#')
+
+        lines = ['abc', '\n', '1', 'def', '#ignore', '2']
+        self.assertEqual(list(LineGrouper(1)(lines)),
+                         [['abc'], ['1'], ['def'], ['#ignore'], ['2']])
+        self.assertEqual(list(LineGrouper(1, ignore=never)(lines)),
+                         [[i.strip()] for i in lines])
+        self.assertEqual(list(LineGrouper(2, ignore=ignore_labels)(lines)),
+                         [['abc', '1'], ['def', '2']])
+
+    def test_constructor_is_none(self):
+        lines = ['abc', ' def   ', ' ghi', 'jkl  ']
+
+        # should strip
+        exp = [['abc', 'def'], ['ghi', 'jkl']]
+        obs = list(LineGrouper(2)(lines))
+        self.assertEqual(obs, exp)
+
+        # should not strip
+        exp = [['abc', ' def   '], [' ghi', 'jkl  ']]
+        obs = list(LineGrouper(2, constructor=None)(lines))
+        self.assertEqual(obs, exp)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/sequence/__init__.py b/skbio/sequence/__init__.py
new file mode 100644
index 0000000..4a9427d
--- /dev/null
+++ b/skbio/sequence/__init__.py
@@ -0,0 +1,163 @@
+r"""
+Biological sequences (:mod:`skbio.sequence`)
+============================================
+
+.. currentmodule:: skbio.sequence
+
+This module provides functionality for working with biological sequences,
+including generic sequences, nucelotide sequences, DNA sequences, and RNA
+sequences. Class methods and attributes are also available to obtain valid
+character sets, complement maps for different sequence types, and for
+obtaining degenerate character definitions. Additionaly this module defines the
+``GeneticCode`` class, which represents an immutable object that translates RNA
+or DNA strings to amino acid sequences.
+
+Classes
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+   BiologicalSequence
+   NucleotideSequence
+   DNASequence
+   RNASequence
+   ProteinSequence
+   GeneticCode
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   genetic_code
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   BiologicalSequenceError
+   GeneticCodeError
+   GeneticCodeInitError
+   InvalidCodonError
+
+Examples
+--------
+>>> from skbio.sequence import DNASequence, RNASequence
+
+New sequences are created with optional id and description fields.
+
+>>> d1 = DNASequence('ACC--G-GGTA..')
+>>> d1 = DNASequence('ACC--G-GGTA..',id="seq1")
+>>> d1 = DNASequence('ACC--G-GGTA..',id="seq1",description="GFP")
+
+New sequences can also be created from existing sequences, for example as their
+reverse complement or degapped (i.e., unaligned) version.
+
+>>> d2 = d1.degap()
+>>> d1
+<DNASequence: ACC--G-GGT... (length: 13)>
+>>> d2
+<DNASequence: ACCGGGTA (length: 8)>
+>>> d3 = d2.reverse_complement()
+>>> d3
+<DNASequence: TACCCGGT (length: 8)>
+
+It's also straight-forward to compute distances between sequences (optionally
+using user-defined distance metrics, default is Hamming distance) for use in
+sequence clustering, phylogenetic reconstruction, etc.
+
+>>> d4 = DNASequence('GACCCGCT')
+>>> d5 = DNASequence('GACCCCCT')
+>>> d3.distance(d4)
+0.25
+>>> d3.distance(d5)
+0.375
+
+Class-level methods contain information about the molecule types.
+
+>>> DNASequence.iupac_degeneracies()['B']
+set(['C', 'T', 'G'])
+
+>>> RNASequence.iupac_degeneracies()['B']
+set(['C', 'U', 'G'])
+
+>>> DNASequence.is_gap('-')
+True
+
+Creating and using a ``GeneticCode`` object
+
+>>> from skbio.sequence import genetic_code
+>>> from pprint import pprint
+>>> sgc = genetic_code(1)
+>>> sgc
+GeneticCode(FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG)
+>>> sgc['UUU'] == 'F'
+True
+>>> sgc['TTT'] == 'F'
+True
+>>> sgc['F'] == ['TTT', 'TTC']          #in arbitrary order
+True
+>>> sgc['*'] == ['TAA', 'TAG', 'TGA']   #in arbitrary order
+True
+
+Retrieving the anticodons of the object
+
+>>> pprint(sgc.anticodons)
+{'*': ['TTA', 'CTA', 'TCA'],
+ 'A': ['AGC', 'GGC', 'TGC', 'CGC'],
+ 'C': ['ACA', 'GCA'],
+ 'D': ['ATC', 'GTC'],
+ 'E': ['TTC', 'CTC'],
+ 'F': ['AAA', 'GAA'],
+ 'G': ['ACC', 'GCC', 'TCC', 'CCC'],
+ 'H': ['ATG', 'GTG'],
+ 'I': ['AAT', 'GAT', 'TAT'],
+ 'K': ['TTT', 'CTT'],
+ 'L': ['TAA', 'CAA', 'AAG', 'GAG', 'TAG', 'CAG'],
+ 'M': ['CAT'],
+ 'N': ['ATT', 'GTT'],
+ 'P': ['AGG', 'GGG', 'TGG', 'CGG'],
+ 'Q': ['TTG', 'CTG'],
+ 'R': ['ACG', 'GCG', 'TCG', 'CCG', 'TCT', 'CCT'],
+ 'S': ['AGA', 'GGA', 'TGA', 'CGA', 'ACT', 'GCT'],
+ 'T': ['AGT', 'GGT', 'TGT', 'CGT'],
+ 'V': ['AAC', 'GAC', 'TAC', 'CAC'],
+ 'W': ['CCA'],
+ 'Y': ['ATA', 'GTA']}
+
+NucleotideSequences can be translated using a ``GeneticCode`` object.
+
+>>> d6 = DNASequence('ATGTCTAAATGA')
+>>> from skbio.sequence import genetic_code
+>>> gc = genetic_code(11)
+>>> gc.translate(d6)
+<ProteinSequence: MSK* (length: 4)>
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._exception import (BiologicalSequenceError, GeneticCodeError,
+                         GeneticCodeInitError, InvalidCodonError)
+from ._sequence import (BiologicalSequence, NucleotideSequence, DNASequence,
+                        RNASequence, ProteinSequence, DNA, RNA, Protein)
+from ._genetic_code import GeneticCode, genetic_code
+
+__all__ = ['BiologicalSequenceError', 'GeneticCodeError',
+           'GeneticCodeInitError', 'InvalidCodonError', 'BiologicalSequence',
+           'NucleotideSequence', 'DNASequence', 'RNASequence',
+           'ProteinSequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
+           'genetic_code']
+
+test = Tester().test
diff --git a/skbio/sequence/_exception.py b/skbio/sequence/_exception.py
new file mode 100644
index 0000000..259b15c
--- /dev/null
+++ b/skbio/sequence/_exception.py
@@ -0,0 +1,29 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+
+class BiologicalSequenceError(Exception):
+    """General error for biological sequence validation failures."""
+    pass
+
+
+class GeneticCodeError(Exception):
+    """Base class exception used by the GeneticCode class"""
+    pass
+
+
+class GeneticCodeInitError(ValueError, GeneticCodeError):
+    """Exception raised by the GeneticCode class upon a bad initialization"""
+    pass
+
+
+class InvalidCodonError(KeyError, GeneticCodeError):
+    """Exception raised by the GeneticCode class if __getitem__ fails"""
+    pass
diff --git a/skbio/sequence/_genetic_code.py b/skbio/sequence/_genetic_code.py
new file mode 100644
index 0000000..9794c1c
--- /dev/null
+++ b/skbio/sequence/_genetic_code.py
@@ -0,0 +1,620 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import re
+
+from collections import defaultdict
+
+from skbio._base import SkbioObject
+from skbio.sequence import Protein, InvalidCodonError, GeneticCodeInitError
+
+# py3k compatibility
+try:
+    from string import maketrans
+except ImportError:
+    maketrans = str.maketrans
+
+_dna_trans = maketrans('TCAG', 'AGTC')
+
+
+def _simple_rc(seq):
+    """simple reverse-complement: works only on unambiguous uppercase DNA"""
+    return seq.translate(_dna_trans)[::-1]
+
+
+class GeneticCode(SkbioObject):
+
+    """Class to hold codon to amino acid mapping, and vice versa.
+
+    Attributes
+    ----------
+    code_sequence
+    id
+    name
+    start_codon_sequence
+    start_codons
+    codons
+    synonyms
+    sense_codons
+    anticodons
+    blocks
+
+    Parameters
+    ----------
+    code_sequence : str
+        64-character string containing NCBI representation.
+    id : str, optional
+        identifier for the object.
+    name : str, optional
+        name for the object.
+    start_codon_sequence : str, optional
+        starting point for the codon sequence.
+
+    Returns
+    -------
+    GeneticCode
+        initialized ``GeneticCode`` object.
+
+    Raises
+    ------
+    GeneticCodeInitError
+        If the length of `code_sequence` is different to `64`.
+
+    Methods
+    -------
+    changes
+    get_stop_indices
+    is_start
+    is_stop
+    translate_six_frames
+    translate
+    __repr__
+    __getitem__
+    __str__
+    __eq__
+
+    Examples
+    --------
+    >>> from skbio.sequence import GeneticCode
+    >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSR'
+    ...                   'RVVVVAAAADDEEGGGG')
+
+    .. note:: `*` is used to denote termination as per the NCBI standard.
+        Although the genetic code objects convert DNA to RNA and vice versa,
+        lists of codons that they produce will be provided in DNA format.
+
+    """
+    # class data: need the bases, the list of codons in UUU -> GGG order, and
+    # a mapping from positions in the list back to codons. These should be the
+    # same for all GeneticCode instances, and are immutable (therefore
+    # private).
+    _codons = [a + b + c for a in "TCAG" for b in "TCAG" for c in "TCAG"]
+
+    def __init__(self, code_sequence, id=None, name=None,
+                 start_codon_sequence=None):
+        if len(code_sequence) != 64:
+            raise GeneticCodeInitError("code_sequence: %s has length %d, but "
+                                       "expected 64" % (code_sequence,
+                                                        len(code_sequence)))
+
+        self.code_sequence = code_sequence
+        self.id = id
+        self.name = name
+        self.start_codon_sequence = start_codon_sequence
+        start_codons = {}
+        if start_codon_sequence is not None:
+            for codon, aa in zip(self._codons, start_codon_sequence):
+                if aa != '-':
+                    start_codons[codon] = aa
+        self.start_codons = start_codons
+        codon_lookup = {key: value for (key, value) in zip(self._codons,
+                                                           code_sequence)}
+        self.codons = codon_lookup
+
+        # create synonyms for each aa
+        aa_lookup = defaultdict(list)
+        for codon in self._codons:
+            aa = codon_lookup[codon]
+            aa_lookup[aa].append(codon)
+        self.synonyms = dict(aa_lookup)
+        sense_codons = codon_lookup.copy()
+
+        # create sense codons
+        stop_codons = self['*']
+        for c in stop_codons:
+            del sense_codons[c]
+        self.sense_codons = sense_codons
+
+        # create anticodons
+        ac = {}
+        for aa, codons in self.synonyms.items():
+            ac[aa] = [_simple_rc(element) for element in codons]
+        self.anticodons = ac
+
+    def _analyze_quartet(self, codons, aa):
+        """Analyzes a quartet of codons and amino acids: returns list of lists.
+
+        Each list contains one block, splitting at purine/pyrimidine boundary
+        if necessary.
+
+        codons should be a list of 4 codons.
+        aa should be a list of 4 amino acid symbols.
+
+        Possible states:
+            - All amino acids are the same: returns list of one quartet.
+            - Two groups of 2 aa: returns list of two doublets.
+            - One group of 2 and 2 groups of 1: list of one doublet, 2 singles.
+            - 4 groups of 1: four singles.
+
+        Note: codon blocks like Ile in the standard code (AUU, AUC, AUA) will
+        be split when they cross the R/Y boundary, so [[AUU, AUC], [AUA]]. This
+        would also apply to a block like AUC AUA AUG -> [[AUC],[AUA,AUG]],
+        although this latter pattern is not observed in the standard code.
+        """
+        if aa[0] == aa[1]:
+            first_doublet = True
+        else:
+            first_doublet = False
+        if aa[2] == aa[3]:
+            second_doublet = True
+        else:
+            second_doublet = False
+        if first_doublet and second_doublet and aa[1] == aa[2]:
+            return [codons]
+        else:
+            blocks = []
+            if first_doublet:
+                blocks.append(codons[:2])
+            else:
+                blocks.extend([[codons[0]], [codons[1]]])
+            if second_doublet:
+                blocks.append(codons[2:])
+            else:
+                blocks.extend([[codons[2]], [codons[3]]])
+            return blocks
+
+    def _get_blocks(self):
+        """Returns list of lists of codon blocks in the genetic code.
+
+        A codon block can be:
+            - a quartet, if all 4 XYn codons have the same amino acid.
+            - a doublet, if XYt and XYc or XYa and XYg have the same aa.
+            - a singlet, otherwise.
+
+        Returns
+        -------
+        list
+            Returns a list of the quartets, doublets, and singlets in the order
+            UUU -> GGG.
+
+        Notes
+        -----
+        A doublet cannot span the purine/pyrimidine boundary, and a quartet
+        cannot span the boundary between two codon blocks whose first two bases
+        differ.
+
+        """
+        if hasattr(self, '_blocks'):
+            return self._blocks
+        else:
+            blocks = []
+            curr_codons = []
+            curr_aa = []
+            for index, codon, aa in zip(range(64), self._codons,
+                                        self.code_sequence):
+                # we're in a new block if it's a new quartet or a different aa
+                new_quartet = not index % 4
+                if new_quartet and curr_codons:
+                    blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
+                    curr_codons = []
+                    curr_aa = []
+                curr_codons.append(codon)
+                curr_aa.append(aa)
+            # don't forget to append last block
+            if curr_codons:
+                blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
+            self._blocks = blocks
+            return self._blocks
+
+    blocks = property(_get_blocks)
+
+    def __str__(self):
+        """Returns code_sequence that constructs the GeneticCode
+
+        .. shownumpydoc
+        """
+        return self.code_sequence
+
+    def __repr__(self):
+        """Returns reconstructable representation of the GeneticCode
+
+        .. shownumpydoc
+        """
+        return 'GeneticCode(%s)' % str(self)
+
+    def __eq__(self, other):
+        """ Allows two GeneticCode objects to be compared to each other.
+
+        Two GeneticCode objects are equal if they have equal code_sequences.
+
+        .. shownumpydoc
+        """
+        if not isinstance(other, GeneticCode):
+            return False
+        return self.code_sequence == other.code_sequence
+
+    def __ne__(self, other):
+        """Required in Py2."""
+        return not self == other
+
+    def __getitem__(self, item):
+        """Returns amino acid corresponding to codon, or codons for an aa.
+
+        Returns [] for empty list of codons, 'X' for unknown amino acid.
+
+        .. shownumpydoc
+        """
+        item = str(item)
+        if len(item) == 1:  # amino acid
+            return self.synonyms.get(item, [])
+        elif len(item) == 3:  # codon
+            key = item.upper()
+            key = key.replace('U', 'T')
+            return self.codons.get(key, 'X')
+        else:
+            raise InvalidCodonError("Codon or aa %s has wrong length" % item)
+
+    def translate(self, nucleotide_sequence, start=0):
+        """Translate nucleotide to protein sequence
+
+        Parameters
+        ----------
+        nucleotide_sequence : NucleotideSequence
+            sequence to be translated
+        start : int, optional
+            position to begin translation
+
+        Returns
+        -------
+        ProteinSequence
+            translation of nucleotide_sequence
+
+        Notes
+        -----
+        ``translate`` returns the translation of the entire sequence, (i.e., of
+        ``nucleotide_sequence[start:]``). It is the user's responsibility to
+        trim to an open reading frame, either from the input or using the
+        output, if that is desired.
+
+        See Also
+        --------
+        translate_six_frames
+
+        Examples
+        --------
+        >>> from skbio.sequence import GeneticCode
+        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
+        ...                   'RRVVVVAAAADDEEGGGG')
+        >>> sgc.translate('AUGCAUGACUUUUGA', 1)
+        <ProteinSequence: CMTF (length: 4)>
+
+        """
+        if len(nucleotide_sequence) == 0:
+            return Protein('')
+        if start + 1 > len(nucleotide_sequence):
+            raise ValueError("Translation starts after end of"
+                             "NucleotideSequence")
+
+        translation = []
+        for i in range(start, len(nucleotide_sequence) - 2, 3):
+            translation.append(self[nucleotide_sequence[i:i + 3]])
+        translation = Protein(''.join(translation))
+
+        return translation
+
+    def get_stop_indices(self, nucleotide_sequence, start=0):
+        """returns indexes for stop codons in the specified frame
+
+        Parameters
+        ----------
+        nucleotide_sequence : str, NucleotideSequence
+            sequence to be scanned for stop codons
+        start : int, optional
+            position where the search begins.
+
+        Returns
+        -------
+        list
+            indices of the stop codons.
+
+        Examples
+        --------
+        >>> from skbio.sequence import GeneticCode, DNA
+        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
+        ...                   'RRVVVVAAAADDEEGGGG')
+        >>> seq = DNA('ATGCTAACATAAA')
+        >>> sgc.get_stop_indices(seq, 0)
+        [9]
+
+        """
+        stops = self['*']
+        stop_pattern = '(%s)' % '|'.join(stops)
+        stop_pattern = re.compile(stop_pattern)
+        seq = str(nucleotide_sequence)
+        found = [hit.start() for hit in stop_pattern.finditer(seq)]
+        found = [index for index in found if index % 3 == start]
+        return found
+
+    def translate_six_frames(self, nucleotide_sequence):
+        """Translate nucleotide to protein sequences for all six reading frames
+
+        Parameters
+        ----------
+        nucleotide_sequence : NucleotideSequence
+            sequence to be translated
+
+        Returns
+        -------
+        list
+            the six translated ProteinSequence objects
+
+        See Also
+        --------
+        translate
+
+        Examples
+        --------
+        >>> from skbio.sequence import GeneticCode, RNA
+        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
+        ...                   'RRVVVVAAAADDEEGGGG')
+        >>> results = sgc.translate_six_frames(RNA('AUGCUAACAUAAA'))
+        >>> for e in results: e
+        <ProteinSequence: MLT* (length: 4)>
+        <ProteinSequence: C*HK (length: 4)>
+        <ProteinSequence: ANI (length: 3)>
+        <ProteinSequence: FMLA (length: 4)>
+        <ProteinSequence: LC*H (length: 4)>
+        <ProteinSequence: YVS (length: 3)>
+
+        """
+        rc_nucleotide_sequence = nucleotide_sequence.rc()
+        results = []
+        for start in range(3):
+            translation = self.translate(nucleotide_sequence, start)
+            results.append(translation)
+
+        for start in range(3):
+            translation = self.translate(rc_nucleotide_sequence, start)
+            results.append(translation)
+
+        return results
+
+    def is_start(self, codon):
+        """Checks if codon is a start codon
+
+        Parameters
+        ----------
+        codon : str
+            codon string
+
+        Returns
+        -------
+        bool
+            ``True`` if codon is a start codon, ``False`` otherwise
+
+        Examples
+        --------
+        >>> from skbio.sequence import GeneticCode
+        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
+        ...                   'RRVVVVAAAADDEEGGGG')
+        >>> sgc.is_start('ATG')
+        False
+        >>> sgc.is_start('AAA')
+        False
+
+        """
+        fixed_codon = codon.upper().replace('U', 'T')
+        return fixed_codon in self.start_codons
+
+    def is_stop(self, codon):
+        """Checks if codon is a stop codon
+
+        Parameters
+        ----------
+        codon : str
+            codon string
+
+        Returns
+        -------
+        bool
+            ``True`` if codon is a stop codon, ``False`` otherwise
+
+        Examples
+        --------
+        >>> from skbio.sequence import GeneticCode
+        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
+        ...                   'RRVVVVAAAADDEEGGGG')
+        >>> sgc.is_stop('UAA')
+        True
+        >>> sgc.is_stop('AAA')
+        False
+
+        """
+        return self[codon] == '*'
+
+    def changes(self, other):
+        """Returns dictionary of codons that differ
+
+        Parameters
+        ----------
+        other : GeneticCode
+           genetic code object
+
+        Returns
+        -------
+        dict
+            Returns a dictionary of the form ``{codon:'XY'}`` for codons that
+            differ. X is the string representation of the amino acid in the
+            object calling this method, Y is the string representation of the
+            amino acid in `other`. Always returns a 2-character string.
+
+        Examples
+        --------
+        >>> from skbio.sequence import GeneticCode
+        >>> from pprint import pprint
+        >>> sgc = GeneticCode('FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS*'
+        ...                   '*VVVVAAAADDEEGGGG')
+        >>> pprint(sgc.changes('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTT'
+        ...                    'TNNKKSSRRVVVVAAAADDEEGGGG'))
+        {'AGA': '*R', 'AGG': '*R', 'ATA': 'MI', 'TGA': 'W*'}
+
+        """
+        changes = {}
+        try:
+            other_code = other.code_sequence
+        except AttributeError:  # try using other directly as sequence
+            other_code = other
+        for codon, old, new in zip(self._codons, self.code_sequence,
+                                   other_code):
+            if old != new:
+                changes[codon] = old + new
+        return changes
+
+
+_ncbi_genetic_code_data = [
+    [
+        'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        1,
+        'Standard Nuclear',
+        '---M---------------M---------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG',
+        2,
+        'Vertebrate Mitochondrial',
+        '--------------------------------MMMM---------------M------------',
+    ],
+    [
+        'FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        3,
+        'Yeast Mitochondrial',
+        '----------------------------------MM----------------------------',
+    ],
+    [
+        'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        4,
+        'Mold, Protozoan, and Coelenterate Mitochondrial, and Mycoplasma/'
+        'Spiroplasma Nuclear',
+        '--MM---------------M------------MMMM---------------M------------',
+    ],
+    [
+        'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG',
+        5,
+        'Invertebrate Mitochondrial',
+        '---M----------------------------MMMM---------------M------------',
+    ],
+    [
+        'FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        6,
+        'Ciliate, Dasycladacean and Hexamita Nuclear',
+        '-----------------------------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
+        9,
+        'Echinoderm and Flatworm Mitochondrial',
+        '-----------------------------------M---------------M------------',
+    ],
+    [
+        'FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        10,
+        'Euplotid Nuclear',
+        '-----------------------------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        11,
+        'Bacterial Nuclear and Plant Plastid',
+        '---M---------------M------------MMMM---------------M------------',
+    ],
+    [
+        'FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        12,
+        'Alternative Yeast Nuclear',
+        '-------------------M---------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG',
+        13,
+        'Ascidian Mitochondrial',
+        '-----------------------------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
+        14,
+        'Alternative Flatworm Mitochondrial',
+        '-----------------------------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        15,
+        'Blepharisma Nuclear',
+        '-----------------------------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        16,
+        'Chlorophycean Mitochondrial',
+        '-----------------------------------M----------------------------',
+    ],
+    [
+        'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
+        20,
+        'Trematode Mitochondrial',
+        '-----------------------------------M---------------M------------',
+    ],
+    [
+        'FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        22,
+        'Scenedesmus obliquus Mitochondrial',
+        '-----------------------------------M----------------------------',
+    ],
+    [
+        'FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        23,
+        'Thraustochytrium Mitochondrial',
+    ],
+]
+
+
+def genetic_code(*id):
+    """``skbio.sequence.GeneticCode`` factory given an optional id.
+
+    Parameters
+    ----------
+    id : int or str optional
+        Indicates the ``skbio.sequence.GeneticCode`` to return. Must be in the
+        range of [1, 23] inclusive. If `id` is not provided, the Standard
+        Nuclear genetic code will be returned.
+
+    Returns
+    -------
+    skbio.sequence.GeneticCode
+
+    """
+    key = 1
+    if len(id) == 1:
+        key = int(id[0])
+    if len(id) > 1:
+        raise TypeError('genetic_code takes 0 or 1 arguments (%d given)'
+                        % len(id))
+    for n in _ncbi_genetic_code_data:
+        if n[1] == key:
+            return GeneticCode(*n)
+
+    raise ValueError('Genetic code could not be found for %d.' % id)
diff --git a/skbio/sequence/_sequence.py b/skbio/sequence/_sequence.py
new file mode 100644
index 0000000..52baffe
--- /dev/null
+++ b/skbio/sequence/_sequence.py
@@ -0,0 +1,1969 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range
+from future.utils import viewitems
+from six import string_types
+
+import re
+import warnings
+from collections import Sequence, Counter, defaultdict
+from itertools import product
+
+import numpy as np
+from scipy.spatial.distance import hamming
+
+from skbio._base import SkbioObject
+from skbio.sequence import BiologicalSequenceError
+
+
+class BiologicalSequence(Sequence, SkbioObject):
+    """Base class for biological sequences.
+
+    Parameters
+    ----------
+    sequence : python Sequence (e.g., str, list or tuple)
+        The biological sequence.
+    id : str, optional
+        The sequence id (e.g., an accession number).
+    description : str, optional
+        A description or comment about the sequence (e.g., "green
+        fluorescent protein").
+    quality : 1-D array_like, int, optional
+        Phred quality scores stored as nonnegative integers, one per sequence
+        character. If provided, must be the same length as the biological
+        sequence. Can be a 1-D ``numpy.ndarray`` of integers, or a structure
+        that can be converted to this representation using ``numpy.asarray``. A
+        copy will *not* be made if `quality` is already a 1-D ``numpy.ndarray``
+        with an ``int`` ``dtype``. The array will be made read-only (i.e., its
+        ``WRITEABLE`` flag will be set to ``False``).
+    validate : bool, optional
+        If True, runs the `is_valid` method after construction and raises
+        BiologicalSequenceError if ``is_valid == False``.
+
+    Attributes
+    ----------
+    sequence
+    id
+    description
+    quality
+
+    Raises
+    ------
+    skbio.sequence.BiologicalSequenceError
+        If ``validate == True`` and ``is_valid == False``, or if `quality` is
+        not the correct shape.
+
+    See Also
+    --------
+    NucleotideSequence
+    DNASequence
+    RNASequence
+
+    Notes
+    -----
+    `BiologicalSequence` objects are immutable. Where applicable, methods
+    return a new object of the same class.
+    Subclasses are typically defined by methods relevant to only a specific
+    type of biological sequence, and by containing characters only contained in
+    the IUPAC standard character set [1]_ for that molecule type.
+
+    Examples
+    --------
+    >>> from skbio.sequence import BiologicalSequence
+    >>> s = BiologicalSequence('GGUCGUGAAGGA')
+    >>> t = BiologicalSequence('GGUCCUGAAGGU')
+
+    References
+    ----------
+    .. [1] Nomenclature for incompletely specified bases in nucleic acid
+       sequences: recommendations 1984.
+       Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
+       A Cornish-Bowden
+
+    """
+    default_write_format = 'fasta'
+
+    @classmethod
+    def alphabet(cls):
+        """Return the set of characters allowed in a `BiologicalSequence`.
+
+        Returns
+        -------
+        set
+            Characters that are allowed in a valid `BiologicalSequence`.
+
+        See Also
+        --------
+        is_valid
+        gap_alphabet
+        unsupported_characters
+        has_unsupported_characters
+
+        """
+        return cls.iupac_characters()
+
+    @classmethod
+    def gap_alphabet(cls):
+        """Return the set of characters defined as gaps.
+
+        Returns
+        -------
+        set
+            Characters defined as gaps in a `BiologicalSequence`
+
+        See Also
+        --------
+        alphabet
+        unsupported_characters
+        has_unsupported_characters
+        degap
+        gap_maps
+        gap_vector
+
+        """
+        return set('-.')
+
+    @classmethod
+    def iupac_degenerate_characters(cls):
+        """Return the degenerate IUPAC characters.
+
+        Returns
+        -------
+        set
+            Degenerate IUPAC characters.
+
+        """
+        return set(cls.iupac_degeneracies())
+
+    @classmethod
+    def iupac_characters(cls):
+        """Return the non-degenerate and degenerate characters.
+
+        Returns
+        -------
+        set
+            Non-degenerate and degenerate characters.
+
+        """
+        return (cls.iupac_standard_characters() |
+                cls.iupac_degenerate_characters())
+
+    @classmethod
+    def iupac_standard_characters(cls):
+        """Return the non-degenerate IUPAC characters.
+
+        Returns
+        -------
+        set
+            Non-degenerate IUPAC characters.
+
+        """
+        return set()
+
+    @classmethod
+    def iupac_degeneracies(cls):
+        """Return the mapping of degenerate to non-degenerate characters.
+
+        Returns
+        -------
+        dict of sets
+            Mapping of IUPAC degenerate character to the set of
+            non-degenerate IUPAC characters it represents.
+
+        """
+        return {}
+
+    def __init__(self, sequence, id="", description="", quality=None,
+                 validate=False):
+        if not isinstance(sequence, string_types):
+            sequence = ''.join(sequence)
+        self._sequence = sequence
+
+        self._id = id
+        self._description = description
+        self._set_quality(quality)
+
+        if validate and not self.is_valid():
+            unsupported_chars = self.unsupported_characters()
+            raise BiologicalSequenceError(
+                "Sequence contains unsupported characters: %s"
+                % (" ".join(unsupported_chars)))
+
+    def __contains__(self, other):
+        """The in operator.
+
+        Parameters
+        ----------
+        other : str
+            The putative subsequence.
+
+        Returns
+        -------
+        bool
+            Indicates whether `other` is contained in `self`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+        >>> 'GGU' in s
+        True
+        >>> 'CCC' in s
+        False
+
+        .. shownumpydoc
+
+        """
+        return other in self._sequence
+
+    def __eq__(self, other):
+        """The equality operator.
+
+        Biological sequences are equal if their sequence is the same and they
+        are the same type. Identifier, description, and quality scores
+        **are ignored**.
+
+        Parameters
+        ----------
+        other : `BiologicalSequence`
+            The sequence to test for equality against.
+
+        Returns
+        -------
+        bool
+            Indicates whether `self` and `other` are equal.
+
+        See Also
+        --------
+        __ne__
+        equals
+
+        Notes
+        -----
+        See ``BiologicalSequence.equals`` for more fine-grained control of
+        equality testing.
+
+        This method is equivalent to
+        ``self.equals(other, ignore=['id', 'description', 'quality'])``.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+        >>> t = BiologicalSequence('GGUCGUGAAGGA')
+        >>> s == t
+        True
+        >>> u = BiologicalSequence('GGUCGUGACCGA')
+        >>> u == t
+        False
+
+        Note that even though the quality scores do not match between ``u`` and
+        ``v``, they are considered equal:
+
+        >>> v = BiologicalSequence('GGUCGUGACCGA',
+        ...                        quality=[1, 5, 3, 3, 2, 42, 100, 9, 10, 55,
+        ...                                 42, 42])
+        >>> u == v
+        True
+
+        .. shownumpydoc
+
+        """
+        return self.equals(other, ignore=['id', 'description', 'quality'])
+
+    def __getitem__(self, i):
+        """The indexing operator.
+
+        Parameters
+        ----------
+        i : int, slice, or sequence of ints
+            The position(s) to return from the `BiologicalSequence`. If `i` is
+            a sequence of ints, these are assumed to be indices in the sequence
+            to keep.
+
+        Returns
+        -------
+        BiologicalSequence
+            New biological sequence containing the character(s) at position(s)
+            `i` in the current `BiologicalSequence`. If quality scores are
+            present, the quality score at position(s) `i` will be included in
+            the returned sequence. ID and description are also included.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+
+        Obtain a single character from the biological sequence:
+
+        >>> s[1]
+        <BiologicalSequence: G (length: 1)>
+
+        Obtain a slice:
+
+        >>> s[7:]
+        <BiologicalSequence: AAGGA (length: 5)>
+
+        Obtain characters at the following indices:
+
+        >>> s[[3, 4, 7, 0, 3]]
+        <BiologicalSequence: CGAGC (length: 5)>
+
+        .. shownumpydoc
+
+        """
+        # TODO update this method when #60 is resolved. we have to deal with
+        # discrepancies in indexing rules between str and ndarray... hence the
+        # ugly code
+        try:
+            try:
+                seq = self.sequence[i]
+                qual = self.quality[i] if self.has_quality() else None
+            except TypeError:
+                seq = [self.sequence[idx] for idx in i]
+
+                if self.has_quality():
+                    qual = [self.quality[idx] for idx in i]
+                else:
+                    qual = None
+        except IndexError:
+            raise IndexError(
+                "Position %r is out of range for %r." % (i, self))
+
+        return self.copy(sequence=seq, quality=qual)
+
+    def __hash__(self):
+        """The hash operator.
+
+        Returns
+        -------
+        int
+            The hash of the `BiologicalSequence`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+        >>> hash(s)
+        -1080059835405276950
+
+        .. shownumpydoc
+
+        """
+        return hash(self._sequence)
+
+    def __iter__(self):
+        """The iter operator.
+
+        Returns
+        -------
+        iterator
+            Position iterator for the `BiologicalSequence`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> for c in s: print(c)
+        G
+        G
+        U
+        C
+
+        .. shownumpydoc
+
+        """
+        return iter(self._sequence)
+
+    def __len__(self):
+        """The len operator.
+
+        Returns
+        -------
+        int
+            The length of the `BiologicalSequence`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> len(s)
+        4
+
+        .. shownumpydoc
+
+        """
+        return len(self._sequence)
+
+    def __ne__(self, other):
+        """The inequality operator.
+
+        Biological sequences are not equal if their sequence is different or
+        they are not the same type. Identifier, description, and quality scores
+        **are ignored**.
+
+        Parameters
+        ----------
+        other : `BiologicalSequence`
+            The sequence to test for inequality against.
+
+        Returns
+        -------
+        bool
+            Indicates whether `self` and `other` are not equal.
+
+        See Also
+        --------
+        __eq__
+        equals
+
+        Notes
+        -----
+        See ``BiologicalSequence.equals`` for more fine-grained control of
+        equality testing.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+        >>> t = BiologicalSequence('GGUCGUGAAGGA')
+        >>> s != t
+        False
+        >>> u = BiologicalSequence('GGUCGUGACCGA')
+        >>> u != t
+        True
+
+        .. shownumpydoc
+
+        """
+        return not (self == other)
+
+    def __repr__(self):
+        """The repr method.
+
+        Returns
+        -------
+        str
+            Returns a string representation of the object.
+
+        Notes
+        -----
+        String representation contains the class name, the first ten characters
+        of the sequence followed by ellipses (or the full sequence
+        and no ellipses, if the sequence is less than 11 characters long),
+        followed by the sequence length.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+        >>> repr(s)
+        '<BiologicalSequence: GGUCGUGAAG... (length: 12)>'
+        >>> t = BiologicalSequence('ACGT')
+        >>> repr(t)
+        '<BiologicalSequence: ACGT (length: 4)>'
+        >>> t
+        <BiologicalSequence: ACGT (length: 4)>
+
+        .. shownumpydoc
+
+        """
+        first_ten = self.sequence[:10]
+        cn = self.__class__.__name__
+        length = len(self)
+        if length > 10:
+            ellipses = "..."
+        else:
+            ellipses = ""
+        return '<%s: %s%s (length: %d)>' % (cn, first_ten, ellipses, length)
+
+    def __reversed__(self):
+        """The reversed operator.
+
+        Returns
+        -------
+        iterator
+            Reverse position iterator for the `BiologicalSequence`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> for c in reversed(s): print(c)
+        C
+        U
+        G
+        G
+
+        .. shownumpydoc
+
+        """
+        return reversed(self._sequence)
+
+    def __str__(self):
+        """The str operator
+
+        Returns
+        -------
+        str
+            String representation of the `BiologicalSequence`. This will be the
+            full sequence, but will not contain information about the type,
+            identifier, description, or quality scores.
+
+        See Also
+        --------
+        to_fasta
+        id
+        description
+        __repr__
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> str(s)
+        'GGUC'
+        >>> print(s)
+        GGUC
+
+        .. shownumpydoc
+
+        """
+        return self.sequence
+
+    @property
+    def sequence(self):
+        """String containing underlying biological sequence characters.
+
+        A string representing the characters of the biological sequence.
+
+        Notes
+        -----
+        This property is not writeable.
+
+        """
+        return self._sequence
+
+    @property
+    def id(self):
+        """ID of the biological sequence.
+
+        A string representing the identifier (ID) of the biological sequence.
+
+        Notes
+        -----
+        This property is not writeable.
+
+        """
+        return self._id
+
+    @property
+    def description(self):
+        """Description of the biological sequence.
+
+        A string representing the description of the biological sequence.
+
+        Notes
+        -----
+        This property is not writeable.
+
+        """
+        return self._description
+
+    @property
+    def quality(self):
+        """Quality scores of the characters in the biological sequence.
+
+        A 1-D ``numpy.ndarray`` of nonnegative integers representing Phred
+        quality scores for each character in the biological sequence, or
+        ``None`` if quality scores are not present.
+
+        Notes
+        -----
+        This property is not writeable. A copy of the array is *not* returned.
+        The array is read-only (i.e., its ``WRITEABLE`` flag is set to
+        ``False``).
+
+        """
+        return self._quality
+
+    def has_quality(self):
+        """Return bool indicating presence of quality scores in the sequence.
+
+        Returns
+        -------
+        bool
+            ``True`` if the biological sequence has quality scores, ``False``
+            otherwise.
+
+        See Also
+        --------
+        quality
+
+        """
+        return self.quality is not None
+
+    def copy(self, **kwargs):
+        """Return a copy of the current biological sequence.
+
+        Returns a copy of the current biological sequence, optionally with
+        updated attributes specified as keyword arguments.
+
+        Parameters
+        ----------
+        kwargs : dict, optional
+            Keyword arguments passed to the ``BiologicalSequence`` (or
+            subclass) constructor. The returned copy will have its attributes
+            updated based on the values in `kwargs`. If an attribute is
+            missing, the copy will keep the same attribute as the current
+            biological sequence. Valid attribute names are `'sequence'`,
+            `'id'`, `'description'`, and `'quality'`. Default behavior is to
+            return a copy of the current biological sequence without changing
+            any attributes.
+
+        Returns
+        -------
+        BiologicalSequence
+            Copy of the current biological sequence, optionally with updated
+            attributes based on `kwargs`. Will be the same type as the current
+            biological sequence (`self`).
+
+        Notes
+        -----
+        This is a shallow copy, but since biological sequences are immutable,
+        it is conceptually the same as a deep copy.
+
+        This method is the preferred way of creating new instances from an
+        existing biological sequence, instead of calling
+        ``self.__class__(...)``, as the latter can be error-prone (e.g.,
+        forgetting to propagate attributes to the new instance).
+
+        Examples
+        --------
+        Create a biological sequence:
+
+        >>> from skbio import BiologicalSequence
+        >>> seq = BiologicalSequence('AACCGGTT', id='id1',
+        ...                          description='biological sequence',
+        ...                          quality=[4, 2, 22, 23, 1, 1, 1, 9])
+
+        Create a copy of ``seq``, keeping the same underlying sequence of
+        characters and quality scores, while updating ID and description:
+
+        >>> new_seq = seq.copy(id='new-id', description='new description')
+
+        Note that the copied biological sequence's underlying sequence and
+        quality scores are the same as ``seq``:
+
+        >>> new_seq.sequence
+        'AACCGGTT'
+        >>> new_seq.quality
+        array([ 4,  2, 22, 23,  1,  1,  1,  9])
+
+        The ID and description have been updated:
+
+        >>> new_seq.id
+        'new-id'
+        >>> new_seq.description
+        'new description'
+
+        The original biological sequence's ID and description have not been
+        changed:
+
+        >>> seq.id
+        'id1'
+        >>> seq.description
+        'biological sequence'
+
+        """
+        defaults = {
+            'sequence': self.sequence,
+            'id': self.id,
+            'description': self.description,
+            'quality': self.quality
+        }
+        defaults.update(kwargs)
+        return self.__class__(**defaults)
+
+    def equals(self, other, ignore=None):
+        """Compare two biological sequences for equality.
+
+        By default, biological sequences are equal if their sequence,
+        identifier, description, and quality scores are the same and they are
+        the same type.
+
+        Parameters
+        ----------
+        other : BiologicalSequence
+            The sequence to test for equality against.
+        ignore : iterable of str, optional
+            List of features to ignore in the equality test. By default, all
+            features must be the same for two biological sequences to be
+            considered equal. Features that can be ignored are ``'type'``,
+            ``'id'``, ``'description'``, ``'quality'``, and ``'sequence'``.
+
+        Returns
+        -------
+        bool
+            Indicates whether `self` and `other` are equal.
+
+        See Also
+        --------
+        __eq__
+        __ne__
+
+        Examples
+        --------
+        Define two biological sequences that have the same underlying sequence
+        of characters:
+
+        >>> from skbio import BiologicalSequence
+        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+        >>> t = BiologicalSequence('GGUCGUGAAGGA')
+
+        The two sequences are considered equal because they are the same type,
+        their underlying sequence of characters are the same, and their
+        optional attributes (id, description, and quality scores) were not
+        provided:
+
+        >>> s.equals(t)
+        True
+        >>> t.equals(s)
+        True
+
+        Define another biological sequence with a different sequence of
+        characters than the previous two biological sequences:
+
+        >>> u = BiologicalSequence('GGUCGUGACCGA')
+        >>> u.equals(t)
+        False
+
+        Define a biological sequence with the same sequence of characters as
+        ``u``, but with different identifier and quality scores:
+        >>> v = BiologicalSequence('GGUCGUGACCGA', id='abc',
+        ...                        quality=[1, 5, 3, 3, 2, 42, 100, 9, 10, 55,
+        ...                                 42, 42])
+
+        By default, the two sequences are *not* considered equal because their
+        identifiers and quality scores do not match:
+
+        >>> u.equals(v)
+        False
+
+        By specifying that the quality scores and identifier should be ignored,
+        they now compare equal:
+
+        >>> u.equals(v, ignore=['quality', 'id'])
+        True
+
+        """
+        if ignore is None:
+            ignore = {}
+
+        # Checks are ordered from least to most expensive.
+        if 'type' not in ignore and self.__class__ != other.__class__:
+            return False
+
+        if 'id' not in ignore and self.id != other.id:
+            return False
+
+        if 'description' not in ignore and \
+                self.description != other.description:
+            return False
+
+        # Use array_equal instead of (a == b).all() because of this issue:
+        #     http://stackoverflow.com/a/10582030
+        if 'quality' not in ignore and not np.array_equal(self.quality,
+                                                          other.quality):
+            return False
+
+        if 'sequence' not in ignore and self.sequence != other.sequence:
+            return False
+
+        return True
+
+    def count(self, subsequence):
+        """Returns the number of occurences of subsequence.
+
+        Parameters
+        ----------
+        subsequence : str
+            The subsequence to count occurences of.
+
+        Returns
+        -------
+        int
+            The number of occurrences of substring in the `BiologicalSequence`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> s.count('G')
+        2
+
+        """
+        return self._sequence.count(subsequence)
+
+    def degap(self):
+        """Returns a new `BiologicalSequence` with gap characters removed.
+
+        Returns
+        -------
+        BiologicalSequence
+            A new `BiologicalSequence` with all characters from
+            `self.gap_alphabet` filtered from the sequence.
+
+        Notes
+        -----
+        The type, id, and description of the result will be the
+        same as `self`. If quality scores are present, they will be filtered in
+        the same manner as the sequence and included in the resulting
+        degapped biological sequence.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC-C--ACGTT-C.', quality=range(16))
+        >>> t = s.degap()
+        >>> t
+        <BiologicalSequence: GGUCCACGTT... (length: 11)>
+        >>> print(t)
+        GGUCCACGTTC
+        >>> t.quality
+        array([ 0,  1,  2,  3,  5,  8,  9, 10, 11, 12, 14])
+
+        """
+        gaps = self.gap_alphabet()
+        indices = [i for i, e in enumerate(self) if e not in gaps]
+        return self[indices]
+
+    def distance(self, other, distance_fn=None):
+        """Returns the distance to other
+
+        Parameters
+        ----------
+        other : `BiologicalSequence`
+            The `BiologicalSequence` to compute the distance to.
+        distance_fn : function, optional
+            Function used to compute the distance between `self` and `other`.
+            If ``None`` (the default), `scipy.spatial.distance.hamming` will be
+            used.
+
+        Returns
+        -------
+        float
+            The distance between `self` and `other`.
+
+        Raises
+        ------
+        skbio.sequence.BiologicalSequenceError
+            If ``len(self) != len(other)`` and ``distance_fn`` ==
+            ``scipy.spatial.distance.hamming``.
+
+        See Also
+        --------
+        fraction_diff
+        fraction_same
+        skbio.DistanceMatrix
+        scipy.spatial.distance.hamming
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> t = BiologicalSequence('AGUC')
+        >>> s.distance(t)
+        0.25
+        >>> def dumb_dist(s1, s2): return 0.42
+        >>> s.distance(t, dumb_dist)
+        0.42
+
+        """
+        if distance_fn is None:
+            distance_fn = hamming
+            if len(self) != len(other):
+                raise BiologicalSequenceError(
+                    "Hamming distance can only be computed between "
+                    "BiologicalSequences of equal length.")
+        return distance_fn(self, other)
+
+    def fraction_diff(self, other):
+        """Return fraction of positions that differ relative to `other`
+
+        Parameters
+        ----------
+        other : `BiologicalSequence`
+            The `BiologicalSequence` to compare against.
+
+        Returns
+        -------
+        float
+            The fraction of positions that differ between `self` and `other`.
+
+        Raises
+        ------
+        skbio.sequence.BiologicalSequenceError
+            If ``len(self) != len(other)``.
+
+        See Also
+        --------
+        distance
+        fraction_same
+        scipy.spatial.distance.hamming
+
+        Notes
+        -----
+        Computed as the Hamming distance between `self` and `other`. This is
+        available in addition to `distance` in case the `distance` method is
+        updated to use something other than ``scipy.spatial.distance.hamming``
+        as the default distance metric. So, if you specifically want the
+        fraction of positions that differ, you should use this function instead
+        of `distance` to ensure backward compatibility.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> t = BiologicalSequence('AGUC')
+        >>> s.fraction_diff(t)
+        0.25
+
+        """
+        return self.distance(other, distance_fn=hamming)
+
+    def fraction_same(self, other):
+        """Return fraction of positions that are the same relative to `other`
+
+        Parameters
+        ----------
+        other : `BiologicalSequence`
+            The `BiologicalSequence` to compare against.
+
+        Returns
+        -------
+        float
+            The fraction of positions that are the same between `self` and
+            `other`.
+
+        Raises
+        ------
+        skbio.sequence.BiologicalSequenceError
+            If ``len(self) != len(other)``.
+
+        See Also
+        --------
+        distance
+        fraction_diff
+        scipy.spatial.distance.hamming
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('GGUC')
+        >>> t = BiologicalSequence('AGUC')
+        >>> s.fraction_same(t)
+        0.75
+
+        """
+        return 1. - self.fraction_diff(other)
+
+    def gap_maps(self):
+        """Return tuples mapping b/w gapped and ungapped positions
+
+        Returns
+        -------
+        tuple containing two lists
+            The first list is the length of the ungapped sequence, and each
+            entry is the position of that base in the gapped sequence. The
+            second list is the length of the gapped sequence, and each entry is
+            either None (if that position represents a gap) or the position of
+            that base in the ungapped sequence.
+
+        See Also
+        --------
+        gap_vector
+
+        Notes
+        -----
+        Visual aid is useful here. Imagine we have
+        ``BiologicalSequence('-ACCGA-TA-')``. The position numbers in the
+        ungapped sequence and gapped sequence will be as follows::
+
+              0123456
+              ACCGATA
+              |||||\\
+             -ACCGA-TA-
+             0123456789
+
+        So, in the first list, position 0 maps to position 1, position 1
+        maps to position 2, position 5 maps to position 7, ... And, in the
+        second list, position 0 doesn't map to anything (so it's None),
+        position 1 maps to position 0, ...
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('-ACCGA-TA-')
+        >>> m = s.gap_maps()
+        >>> m[0]
+        [1, 2, 3, 4, 5, 7, 8]
+        >>> m[1]
+        [None, 0, 1, 2, 3, 4, None, 5, 6, None]
+
+        """
+        degapped_to_gapped = []
+        gapped_to_degapped = []
+        non_gap_count = 0
+        for i, e in enumerate(self):
+            if self.is_gap(e):
+                gapped_to_degapped.append(None)
+            else:
+                gapped_to_degapped.append(non_gap_count)
+                degapped_to_gapped.append(i)
+                non_gap_count += 1
+        return degapped_to_gapped, gapped_to_degapped
+
+    def gap_vector(self):
+        """Return list indicating positions containing gaps
+
+        Returns
+        -------
+        list of booleans
+            The list will be of length ``len(self)``, and a position will
+            contain ``True`` if the character at that position in the
+            `BiologicalSequence` is in `self.gap_alphabet`, and ``False``
+            otherwise.
+
+        See Also
+        --------
+        gap_maps
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('..ACG--TT-')
+        >>> s.gap_vector()
+        [True, True, False, False, False, True, True, False, False, True]
+
+        """
+        return [self.is_gap(c) for c in self._sequence]
+
+    def unsupported_characters(self):
+        """Return the set of unsupported characters in the `BiologicalSequence`
+
+        Returns
+        -------
+        set
+            Invalid characters in the `BiologicalSequence` (i.e., the
+            characters that are present in the `BiologicalSequence` but which
+            are not in `BiologicalSequence.alphabet` or
+            `BiologicalSequence.gap_alphabet`.
+
+        See Also
+        --------
+        is_valid
+        alphabet
+        gap_alphabet
+        has_unsupported_characters
+
+        """
+        return set(self) - self.alphabet() - self.gap_alphabet()
+
+    def has_unsupported_characters(self):
+        """Return bool indicating presence/absence of unsupported characters
+
+        Returns
+        -------
+        bool
+            ``True`` if invalid characters are present in the
+            `BiologicalSequence` (i.e., characters which are not in
+            `BiologicalSequence.alphabet` or
+            `BiologicalSequence.gap_alphabet`) and ``False`` otherwise.
+
+        See Also
+        --------
+        is_valid
+        alphabet
+        gap_alphabet
+        has_unsupported_characters
+
+        """
+        all_supported = self.alphabet() | self.gap_alphabet()
+        for e in self:
+            if e not in all_supported:
+                return True
+        return False
+
+    def index(self, subsequence):
+        """Return the position where subsequence first occurs
+
+        Returns
+        -------
+        int
+            The position where `subsequence` first occurs in the
+            `BiologicalSequence`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('ACACGACGTT-')
+        >>> s.index('ACG')
+        2
+
+        """
+        try:
+            return self._sequence.index(subsequence)
+        except ValueError:
+            raise ValueError(
+                "%s is not present in %r." % (subsequence, self))
+
+    @classmethod
+    def is_gap(cls, char):
+        """Return True if `char` is in the `gap_alphabet` set
+
+        Parameters
+        ----------
+        char : str
+            The string to check for presence in the `BiologicalSequence`
+            `gap_alphabet`.
+
+        Returns
+        -------
+        bool
+            Indicates whether `char` is in the `BiologicalSequence` attribute
+            `gap_alphabet`.
+
+        Notes
+        -----
+        This is a class method.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> BiologicalSequence.is_gap('.')
+        True
+        >>> BiologicalSequence.is_gap('P')
+        False
+        >>> s = BiologicalSequence('ACACGACGTT')
+        >>> s.is_gap('-')
+        True
+
+        """
+        return char in cls.gap_alphabet()
+
+    def is_gapped(self):
+        """Return True if char(s) in `gap_alphabet` are present
+
+        Returns
+        -------
+        bool
+            Indicates whether there are one or more occurences of any character
+            in `self.gap_alphabet` in the `BiologicalSequence`.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('ACACGACGTT')
+        >>> s.is_gapped()
+        False
+        >>> t = BiologicalSequence('A.CAC--GACGTT')
+        >>> t.is_gapped()
+        True
+
+        """
+        for e in self:
+            if self.is_gap(e):
+                return True
+        return False
+
+    def is_valid(self):
+        """Return True if the sequence is valid
+
+        Returns
+        -------
+        bool
+            ``True`` if `self` is valid, and ``False`` otherwise.
+
+        Notes
+        -----
+        Validity is defined as not containing any characters outside of
+        `self.alphabet` and `self.gap_alphabet`.
+
+        """
+        return not self.has_unsupported_characters()
+
+    def k_words(self, k, overlapping=True):
+        """Get the list of words of length k
+
+        Parameters
+        ----------
+        k : int
+            The word length.
+        overlapping : bool, optional
+            Defines whether the k-words should be overlapping or not
+            overlapping.
+
+        Returns
+        -------
+        iterator of BiologicalSequences
+            Iterator of words of length `k` contained in the
+            BiologicalSequence.
+
+        Raises
+        ------
+        ValueError
+            If k < 1.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('ACACGACGTT')
+        >>> [str(kw) for kw in s.k_words(4, overlapping=False)]
+        ['ACAC', 'GACG']
+        >>> [str(kw) for kw in s.k_words(3, overlapping=True)]
+        ['ACA', 'CAC', 'ACG', 'CGA', 'GAC', 'ACG', 'CGT', 'GTT']
+
+        """
+        if k < 1:
+            raise ValueError("k must be greater than 0.")
+
+        sequence_length = len(self)
+
+        if overlapping:
+            step = 1
+        else:
+            step = k
+
+        for i in range(0, sequence_length - k + 1, step):
+            yield self[i:i+k]
+
+    def k_word_counts(self, k, overlapping=True):
+        """Get the counts of words of length k
+
+        Parameters
+        ----------
+        k : int
+            The word length.
+        overlapping : bool, optional
+            Defines whether the k-words should be overlapping or not
+            overlapping.
+
+        Returns
+        -------
+        collections.Counter
+            The counts of words of length `k` contained in the
+            BiologicalSequence.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('ACACAT')
+        >>> s.k_word_counts(3, overlapping=True)
+        Counter({'ACA': 2, 'CAC': 1, 'CAT': 1})
+
+        """
+        k_words = self.k_words(k, overlapping)
+        return Counter((str(seq) for seq in k_words))
+
+    def k_word_frequencies(self, k, overlapping=True):
+        """Get the frequencies of words of length `k`
+
+        Parameters
+        ----------
+        k : int
+            The word length.
+        overlapping : bool, optional
+            Defines whether the k-words should be overlapping or not
+            overlapping. This is only relevant when `k` > 1.
+
+        Returns
+        -------
+        collections.defaultdict
+            The frequencies of words of length `k` contained in the
+            ``BiologicalSequence``.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('ACACAT')
+        >>> s.k_word_frequencies(3, overlapping=True)
+        defaultdict(<type 'float'>, {'CAC': 0.25, 'ACA': 0.5, 'CAT': 0.25})
+
+        """
+        if overlapping:
+            num_words = len(self) - k + 1
+        else:
+            num_words = len(self) // k
+
+        result = defaultdict(float)
+        k_word_counts = self.k_word_counts(k, overlapping=overlapping)
+        for word, count in viewitems(k_word_counts):
+            result[str(word)] = count / num_words
+        return result
+
+    def lower(self):
+        """Convert the BiologicalSequence to lowercase
+
+        Returns
+        -------
+        BiologicalSequence
+            The `BiologicalSequence` with all characters converted to
+            lowercase.
+
+        """
+        return self.copy(sequence=self.sequence.lower())
+
+    def nondegenerates(self):
+        """Yield all nondegenerate versions of the sequence.
+
+        Returns
+        -------
+        generator
+            Generator yielding all possible nondegenerate versions of the
+            sequence. Each sequence will have the same type, id, description,
+            and quality scores as `self`.
+
+        Raises
+        ------
+        BiologicalSequenceError
+            If the sequence contains an invalid character (a character that
+            isn't an IUPAC character or a gap character).
+
+        See Also
+        --------
+        iupac_degeneracies
+
+        Notes
+        -----
+        There is no guaranteed ordering to the generated sequences.
+
+        Examples
+        --------
+        >>> from skbio.sequence import NucleotideSequence
+        >>> seq = NucleotideSequence('TRG')
+        >>> seq_generator = seq.nondegenerates()
+        >>> for s in sorted(seq_generator, key=str): print(s)
+        TAG
+        TGG
+
+        """
+        degen_chars = self.iupac_degeneracies()
+        nonexpansion_chars = self.iupac_standard_characters().union(
+            self.gap_alphabet())
+
+        expansions = []
+        for char in self:
+            if char in nonexpansion_chars:
+                expansions.append(char)
+            else:
+                # Use a try/except instead of explicitly checking for set
+                # membership on the assumption that an exception is rarely
+                # thrown.
+                try:
+                    expansions.append(degen_chars[char])
+                except KeyError:
+                    raise BiologicalSequenceError(
+                        "Sequence contains an invalid character: %s" % char)
+
+        result = product(*expansions)
+        return (self.copy(sequence=nondegen_seq) for nondegen_seq in result)
+
+    def to_fasta(self, field_delimiter=" ", terminal_character="\n"):
+        """Return the sequence as a fasta-formatted string
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``to_fasta`` will be removed in scikit-bio 0.3.0. It is replaced by
+           ``write``, which is a more general method for serializing
+           FASTA-formatted files. ``write`` supports multiple file formats by
+           taking advantage of scikit-bio's I/O registry system. See
+           :mod:`skbio.io` for more details.
+
+        Parameters
+        ----------
+        field_delimiter : str, optional
+            The character(s) to use on the header line between the
+            `self.id` and `self.description`.
+
+        terminal_character : str, optional
+            The last character to be included in the result (if you don't want
+            a trailing newline or other character in the result, you can pass
+            ``terminal_character=""``).
+
+        Returns
+        -------
+        str
+            The `BiologicalSequence` as a fasta-formatted string.
+
+        Examples
+        --------
+        >>> from skbio.sequence import BiologicalSequence
+        >>> s = BiologicalSequence('ACACGACGTT')
+        >>> print(s.to_fasta(terminal_character=""))
+        >
+        ACACGACGTT
+        >>> t = BiologicalSequence('ACA',id='my-seq',description='h')
+        >>> print(t.to_fasta(terminal_character=""))
+        >my-seq h
+        ACA
+
+        """
+        warnings.warn(
+            "BiologicalSequence.to_fasta is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use "
+            "BiologicalSequence.write.", DeprecationWarning)
+
+        if self._description:
+            header_line = '%s%s%s' % (self._id, field_delimiter,
+                                      self._description)
+        else:
+            header_line = self._id
+
+        return '>%s\n%s%s' % (
+            header_line, self.sequence, terminal_character)
+
+    def upper(self):
+        """Convert the BiologicalSequence to uppercase
+
+        Returns
+        -------
+        BiologicalSequence
+            The `BiologicalSequence` with all characters converted to
+            uppercase.
+
+        """
+        return self.copy(sequence=self.sequence.upper())
+
+    def _set_quality(self, quality):
+        if quality is not None:
+            quality = np.asarray(quality)
+
+            if quality.ndim == 0:
+                # We have something scalar-like, so create a single-element
+                # vector to store it.
+                quality = np.reshape(quality, 1)
+
+            if quality.shape == (0,):
+                # cannot safe cast an empty vector from float to int
+                cast_type = 'unsafe'
+            else:
+                cast_type = 'safe'
+
+            quality = quality.astype(int, casting=cast_type, copy=False)
+            quality.flags.writeable = False
+
+            if quality.ndim != 1:
+                raise BiologicalSequenceError(
+                    "Phred quality scores must be 1-D.")
+            if len(quality) != len(self):
+                raise BiologicalSequenceError(
+                    "Number of Phred quality scores (%d) must match the "
+                    "number of characters in the biological sequence (%d)." %
+                    (len(quality), len(self._sequence)))
+            if (quality < 0).any():
+                raise BiologicalSequenceError(
+                    "Phred quality scores must be greater than or equal to "
+                    "zero.")
+
+        self._quality = quality
+
+    def regex_iter(self, regex, retrieve_group_0=False):
+        """Find patterns specified by regular expression
+
+        Parameters
+        ----------
+        regex : SRE_Pattern
+            A compiled regular expression (e.g., from re.compile) with
+            finditer method
+        retrieve_group_0 : bool, optional
+            Defaults to ``False``. If ``True``, group(0) will be included in
+            each list of tuples, which represents the shortest possible
+            substring of the full sequence that contains all the other groups
+
+        Returns
+        -------
+        generator
+            yields lists of 3-tuples. Each 3-tuple represents a group from the
+            matched regular expression, and contains the start of the hit, the
+            end of the hit, and the substring that was hit
+        """
+        start = 0 if retrieve_group_0 else 1
+
+        for match in regex.finditer(self._sequence):
+            for g in range(start, len(match.groups())+1):
+                yield (match.start(g), match.end(g), match.group(g))
+
+
+class NucleotideSequence(BiologicalSequence):
+    """Base class for nucleotide sequences.
+
+    A `NucleotideSequence` is a `BiologicalSequence` with additional methods
+    that are only applicable for nucleotide sequences, and containing only
+    characters used in the IUPAC DNA or RNA lexicon.
+
+    See Also
+    --------
+    BiologicalSequence
+
+    Notes
+    -----
+    All uppercase and lowercase IUPAC DNA/RNA characters are supported.
+
+    """
+
+    @classmethod
+    def complement_map(cls):
+        """Return the mapping of characters to their complements.
+
+        Returns
+        -------
+        dict
+            Mapping of characters to their complements.
+
+        Notes
+        -----
+        Complements cannot be defined for a generic `NucleotideSequence`
+        because the complement of 'A' is ambiguous.
+        `NucleotideSequence.complement_map` will therefore be the empty dict.
+        Thanks, nature...
+
+        """
+        return {}
+
+    @classmethod
+    def iupac_standard_characters(cls):
+        """Return the non-degenerate IUPAC nucleotide characters.
+
+        Returns
+        -------
+        set
+            Non-degenerate IUPAC nucleotide characters.
+
+        """
+        return set("ACGTUacgtu")
+
+    @classmethod
+    def iupac_degeneracies(cls):
+        """Return the mapping of degenerate to non-degenerate characters.
+
+        Returns
+        -------
+        dict of sets
+            Mapping of IUPAC degenerate nucleotide character to the set of
+            non-degenerate IUPAC nucleotide characters it represents.
+
+        """
+        degen_map = {
+            "R": set("AG"), "Y": set("CTU"), "M": set("AC"), "K": set("TUG"),
+            "W": set("ATU"), "S": set("GC"), "B": set("CGTU"),
+            "D": set("AGTU"), "H": set("ACTU"), "V": set("ACG"),
+            "N": set("ACGTU")
+        }
+
+        for degen_char in list(degen_map.keys()):
+            nondegen_chars = degen_map[degen_char]
+            degen_map[degen_char.lower()] = set(
+                ''.join(nondegen_chars).lower())
+
+        return degen_map
+
+    def _complement(self, reverse=False):
+        """Returns `NucleotideSequence` that is (reverse) complement of `self`.
+
+        Parameters
+        ----------
+        reverse : bool, optional
+            If ``True``, reverse `self` before complementing.
+
+        Returns
+        -------
+        NucelotideSequence
+            The (reverse) complement of `self`. Specific type will be the same
+            as ``type(self)``.
+
+        Raises
+        ------
+        skbio.sequence.BiologicalSequenceError
+            If a character is present in the `NucleotideSequence` that is not
+            in the complement map.
+
+        Notes
+        -----
+        This private method centralizes the logic for `complement` and
+        `reverse_complement`.
+
+        """
+        result = []
+        complement_map = self.complement_map()
+        seq_iterator = reversed(self) if reverse else self
+        for base in seq_iterator:
+            try:
+                result.append(complement_map[base])
+            except KeyError:
+                raise BiologicalSequenceError(
+                    "Don't know how to complement base %s. Is it in "
+                    "%s.complement_map?" % (base, self.__class__.__name__))
+
+        quality = self.quality
+        if self.has_quality() and reverse:
+            quality = self.quality[::-1]
+
+        return self.copy(sequence=result, quality=quality)
+
+    def complement(self):
+        """Return the complement of the `NucleotideSequence`
+
+        Returns
+        -------
+        NucelotideSequence
+            The complement of `self`. Specific type will be the same as
+            ``type(self)``.
+
+        Raises
+        ------
+        skbio.sequence.BiologicalSequenceError
+            If a character is present in the `NucleotideSequence` that is not
+            in `self.complement_map`.
+
+        See Also
+        --------
+        reverse_complement
+        complement_map
+
+        Notes
+        -----
+        The type, id, description, and quality scores of the result will be the
+        same as `self`.
+
+        """
+        return self._complement()
+
+    def is_reverse_complement(self, other):
+        """Return True if `other` is the reverse complement of `self`
+
+        Returns
+        -------
+        bool
+            `True` if `other` is the reverse complement of `self` and `False`
+            otherwise.
+
+        Raises
+        ------
+        skbio.sequence.BiologicalSequenceError
+            If a character is present in `other` that is not in the
+            `self.complement_map`.
+
+        See Also
+        --------
+        reverse_complement
+
+        """
+        return self == other.reverse_complement()
+
+    def reverse_complement(self):
+        """Return the reverse complement of the `NucleotideSequence`
+
+        Returns
+        -------
+        NucelotideSequence
+            The reverse complement of `self`. Specific type will be the same as
+            ``type(self)``.
+
+        Raises
+        ------
+        skbio.sequence.BiologicalSequenceError
+            If a character is present in the `NucleotideSequence` that is not
+            in `self.complement_map`.
+
+        See Also
+        --------
+        complement
+        complement_map
+        is_reverse_complement
+
+        Notes
+        -----
+        The type, id, and description of the result will be the same as `self`.
+        If quality scores are present, they will be reversed and included in
+        the resulting biological sequence.
+
+        """
+        return self._complement(reverse=True)
+    rc = reverse_complement
+
+    def find_features(self, feature_type, min_length=1, allow_gaps=False):
+        """Search the sequence for features
+
+        Parameters
+        ----------
+        feature_type : {'purine_run', 'pyrimidine_run'}
+            The type of feature to find
+        min_length : int, optional
+            Defaults to 1. Only features at least as long as this will be
+            returned
+        allow_gaps : bool, optional
+            Defaults to ``False``. If ``True``, then gaps will not be
+            considered to disrupt a feature
+
+        Returns
+        -------
+        generator
+            Yields tuples of the start of the feature, the end of the feature,
+            and the subsequence that composes the feature
+
+        Examples
+        --------
+        >>> from skbio.sequence import NucleotideSequence
+        >>> s = NucleotideSequence('G-AT.T')
+        >>> list(s.find_features('purine_run'))
+        [(0, 1, 'G'), (2, 3, 'A')]
+        >>> list(s.find_features('purine_run', 2))
+        []
+        >>> list(s.find_features('purine_run', 2, allow_gaps=True))
+        [(0, 3, 'G-A')]
+        >>> list(s.find_features('pyrimidine_run', 2, allow_gaps=True))
+        [(3, 6, 'T.T')]
+
+        """
+        gaps = re.escape(''.join(self.gap_alphabet()))
+        acceptable = gaps if allow_gaps else ''
+
+        if feature_type == 'purine_run':
+            pat_str = '([AGag%s]{%d,})' % (acceptable, min_length)
+        elif feature_type == 'pyrimidine_run':
+            pat_str = '([CTUctu%s]{%d,})' % (acceptable, min_length)
+        else:
+            raise ValueError("Unknown feature type: %s" % feature_type)
+
+        pat = re.compile(pat_str)
+
+        for hits in self.regex_iter(pat):
+            if allow_gaps:
+                degapped = hits[2]
+                for gap_char in self.gap_alphabet():
+                    degapped = degapped.replace(gap_char, '')
+                if len(degapped) >= min_length:
+                    yield hits
+            else:
+                yield hits
+
+
+class DNASequence(NucleotideSequence):
+    """Base class for DNA sequences.
+
+    A `DNASequence` is a `NucelotideSequence` that is restricted to only
+    containing characters used in IUPAC DNA lexicon.
+
+    See Also
+    --------
+    NucleotideSequence
+    BiologicalSequence
+
+    Notes
+    -----
+    All uppercase and lowercase IUPAC DNA characters are supported.
+
+    """
+
+    @classmethod
+    def complement_map(cls):
+        """Return the mapping of characters to their complements.
+
+        The complement of a gap character is itself.
+
+        Returns
+        -------
+        dict
+            Mapping of characters to their complements.
+
+        """
+        comp_map = {
+            'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
+            'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
+            'H': 'D', 'V': 'B', 'N': 'N', 'a': 't', 't': 'a', 'g': 'c',
+            'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',
+            'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'
+        }
+
+        comp_map.update({c: c for c in cls.gap_alphabet()})
+        return comp_map
+
+    @classmethod
+    def iupac_standard_characters(cls):
+        """Return the non-degenerate IUPAC DNA characters.
+
+        Returns
+        -------
+        set
+            Non-degenerate IUPAC DNA characters.
+
+        """
+        return set("ACGTacgt")
+
+    @classmethod
+    def iupac_degeneracies(cls):
+        """Return the mapping of degenerate to non-degenerate characters.
+
+        Returns
+        -------
+        dict of sets
+            Mapping of IUPAC degenerate DNA character to the set of
+            non-degenerate IUPAC DNA characters it represents.
+
+        """
+        degen_map = {
+            "R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
+            "W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
+            "H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
+        }
+
+        for degen_char in list(degen_map.keys()):
+            nondegen_chars = degen_map[degen_char]
+            degen_map[degen_char.lower()] = set(
+                ''.join(nondegen_chars).lower())
+
+        return degen_map
+
+
+# class is accessible with alternative name for convenience
+DNA = DNASequence
+
+
+class RNASequence(NucleotideSequence):
+    """Base class for RNA sequences.
+
+    An `RNASequence` is a `NucelotideSequence` that is restricted to only
+    containing characters used in the IUPAC RNA lexicon.
+
+    Notes
+    -----
+    All uppercase and lowercase IUPAC RNA characters are supported.
+
+    """
+
+    @classmethod
+    def complement_map(cls):
+        """Return the mapping of characters to their complements.
+
+        The complement of a gap character is itself.
+
+        Returns
+        -------
+        dict
+            Mapping of characters to their complements.
+
+        """
+        comp_map = {
+            'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
+            'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
+            'H': 'D', 'V': 'B', 'N': 'N', 'a': 'u', 'u': 'a', 'g': 'c',
+            'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',
+            'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'
+        }
+
+        comp_map.update({c: c for c in cls.gap_alphabet()})
+        return comp_map
+
+    @classmethod
+    def iupac_standard_characters(cls):
+        """Return the non-degenerate IUPAC RNA characters.
+
+        Returns
+        -------
+        set
+            Non-degenerate IUPAC RNA characters.
+
+        """
+        return set("ACGUacgu")
+
+    @classmethod
+    def iupac_degeneracies(cls):
+        """Return the mapping of degenerate to non-degenerate characters.
+
+        Returns
+        -------
+        dict of sets
+            Mapping of IUPAC degenerate RNA character to the set of
+            non-degenerate IUPAC RNA characters it represents.
+
+        """
+        degen_map = {
+            "R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
+            "W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
+            "H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
+        }
+
+        for degen_char in list(degen_map.keys()):
+            nondegen_chars = degen_map[degen_char]
+            degen_map[degen_char.lower()] = set(
+                ''.join(nondegen_chars).lower())
+
+        return degen_map
+
+# class is accessible with alternative name for convenience
+RNA = RNASequence
+
+
+class ProteinSequence(BiologicalSequence):
+    """Base class for protein sequences.
+
+    A `ProteinSequence` is a `BiologicalSequence` containing only characters
+    used in the IUPAC protein lexicon.
+
+    See Also
+    --------
+    BiologicalSequence
+
+    Notes
+    -----
+    All uppercase and lowercase IUPAC protein characters are supported.
+
+    """
+
+    @classmethod
+    def iupac_standard_characters(cls):
+        """Return the non-degenerate IUPAC protein characters.
+
+        Returns
+        -------
+        set
+            Non-degenerate IUPAC protein characters.
+
+        """
+        return set("ACDEFGHIKLMNPQRSTVWYacdefghiklmnpqrstvwy")
+
+    @classmethod
+    def iupac_degeneracies(cls):
+        """Return the mapping of degenerate to non-degenerate characters.
+
+        Returns
+        -------
+        dict of sets
+            Mapping of IUPAC degenerate protein character to the set of
+            non-degenerate IUPAC protein characters it represents.
+
+        """
+        degen_map = {
+            "B": set("DN"), "Z": set("EQ"),
+            "X": set("ACDEFGHIKLMNPQRSTVWY")
+        }
+
+        degen_map_lower = {}
+        for degen_char in degen_map:
+            nondegen_chars = degen_map[degen_char]
+            degen_map_lower[degen_char.lower()] = set(
+                ''.join(nondegen_chars).lower())
+
+        degen_map.update(degen_map_lower)
+
+        return degen_map
+
+# class is accessible with alternative name for convenience
+Protein = ProteinSequence
diff --git a/skbio/sequence/tests/__init__.py b/skbio/sequence/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/sequence/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/sequence/tests/test_genetic_code.py b/skbio/sequence/tests/test_genetic_code.py
new file mode 100644
index 0000000..829caed
--- /dev/null
+++ b/skbio/sequence/tests/test_genetic_code.py
@@ -0,0 +1,377 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+from skbio import DNA, RNA, Protein
+from skbio.sequence import (GeneticCode, genetic_code,
+                            GeneticCodeInitError, InvalidCodonError)
+
+
+class GeneticCodeTests(TestCase):
+
+    """Tests of the GeneticCode class."""
+
+    def setUp(self):
+        """Set up some standard genetic code representations."""
+        self.sgc = ("FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAAD"
+                    "DEEGGGG")
+        self.mt = ("FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADD"
+                   "EEGGGG")
+        self.allg = ("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"
+                     "GGGGGGGG")
+
+        self.wrong_length = [
+            "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"
+            "",
+            "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"
+            "G",
+        ]
+        self.ncbi_standard = [
+            'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+            1,
+            'Standard Nuclear',
+            '---M---------------M---------------M----------------------------',
+        ]
+
+    def test_init(self):
+        """GeneticCode init should work with correct-length sequences"""
+        sgc = GeneticCode(self.sgc)
+        self.assertEqual(sgc['UUU'], 'F')
+        mt = GeneticCode(self.mt)
+        self.assertEqual(mt['UUU'], 'F')
+        allg = GeneticCode(self.allg)
+        self.assertEqual(allg['UUU'], 'G')
+        for i in self.wrong_length:
+            self.assertRaises(GeneticCodeInitError, GeneticCode, i)
+
+    def test_eq(self):
+        gc_1 = GeneticCode(self.sgc)
+        gc_2 = GeneticCode(self.sgc)
+        self.assertEqual(gc_1, gc_2)
+
+    def test_eq_type_mismatch(self):
+        self.assertFalse(GeneticCode(self.sgc) == 'i cracked the code!')
+
+    def test_ne(self):
+        gc_1 = GeneticCode(self.sgc)
+        gc_2 = GeneticCode(self.sgc)
+        # Explicitly using !=
+        self.assertFalse(gc_1 != gc_2)
+
+    def test_standard_code(self):
+        """Standard genetic code from NCBI should have correct properties"""
+        sgc = GeneticCode(*self.ncbi_standard)
+        self.assertEqual(sgc.code_sequence, 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRI'
+                         'IIMTTTTNNKKSSRRVVVVAAAADDEEGGGG')
+        self.assertEqual(sgc.start_codon_sequence, '---M---------------M------'
+                         '---------M----------------------------')
+        self.assertEqual(sgc.start_codons, {'TTG': 'M', 'CTG': 'M',
+                                            'ATG': 'M'})
+        self.assertEqual(sgc.id, 1)
+        self.assertEqual(sgc.name, 'Standard Nuclear')
+        self.assertEqual(sgc['UUU'], 'F')
+        self.assertEqual(sgc.is_start('ATG'), True)
+        self.assertEqual(sgc.is_start('AAA'), False)
+        self.assertEqual(sgc.is_stop('UAA'), True)
+        self.assertEqual(sgc.is_stop('AAA'), False)
+        self.assertEqual(len(sgc.sense_codons), 61)
+        self.assertTrue('AAA' in sgc.sense_codons)
+        self.assertFalse('TGA' in sgc.sense_codons)
+
+    def test_standard_code_lookup(self):
+        """genetic_code should hold codes keyed by id as string and number"""
+        sgc_new = GeneticCode(*self.ncbi_standard)
+        sgc_number = genetic_code(1)
+        sgc_string = genetic_code('1')
+        sgc_empty = genetic_code()
+        for sgc in sgc_new, sgc_number, sgc_string, sgc_empty:
+            self.assertEqual(sgc.code_sequence, 'FFLLSSSSYY**CC*WLLLLPPPPHHQQR'
+                             'RRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG')
+            self.assertEqual(sgc.start_codon_sequence, '---M---------------M--'
+                             '-------------M----------------------------')
+            self.assertEqual(
+                sgc.start_codons, {'TTG': 'M', 'CTG': 'M', 'ATG': 'M'})
+            self.assertEqual(sgc.id, 1)
+            self.assertEqual(sgc.name, 'Standard Nuclear')
+            self.assertEqual(sgc['TTT'], 'F')
+            self.assertEqual(sgc.is_start('ATG'), True)
+            self.assertEqual(sgc.is_start('AAA'), False)
+            self.assertEqual(sgc.is_stop('TAA'), True)
+            self.assertEqual(sgc.is_stop('AAA'), False)
+
+        mtgc = genetic_code(2)
+        self.assertEqual(mtgc.name, 'Vertebrate Mitochondrial')
+        self.assertEqual(mtgc.is_start('AUU'), True)
+        self.assertEqual(mtgc.is_stop('UGA'), False)
+
+        self.assertEqual(sgc_new.changes(mtgc), {'AGA': 'R*', 'AGG': 'R*',
+                                                 'ATA': 'IM', 'TGA': '*W'})
+        self.assertEqual(mtgc.changes(sgc_new), {'AGA': '*R', 'AGG': '*R',
+                                                 'ATA': 'MI', 'TGA': 'W*'})
+        self.assertEqual(mtgc.changes(mtgc), {})
+        self.assertEqual(mtgc.changes('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTT'
+                         'TNNKKSSRRVVVVAAAADDEEGGGG'), {'AGA': '*R',
+                         'AGG': '*R', 'ATA': 'MI', 'TGA': 'W*'})
+
+    def test_str(self):
+        """GeneticCode str() should return its code string"""
+        code_strings = self.sgc, self.mt, self.allg
+        codes = map(GeneticCode, code_strings)
+        for code, string in zip(codes, code_strings):
+            self.assertEqual(str(code), string)
+        # check an example directly in case strings are bad
+        self.assertEqual(str(self.sgc), "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMT"
+                         "TTTNNKKSSRRVVVVAAAADDEEGGGG")
+
+    def test_cmp(self):
+        """GeneticCode cmp() should act on code strings"""
+        sgc_1 = GeneticCode(self.sgc)
+        sgc_2 = GeneticCode(self.sgc)
+        self.assertEqual(sgc_1 is sgc_2, False)  # ensure different objects
+        # self.assertNotEqual(sgc_1, sgc_2) # GREG
+        self.assertEqual(sgc_1, sgc_2)
+        mtgc = GeneticCode(self.mt)
+        self.assertNotEqual(sgc_1, mtgc)
+
+    def test_getitem_codon(self):
+        """GeneticCode getitem should return amino acid for codon"""
+        # specific checks of a particular codon in the standard code
+        variant_codons = ['AUU', 'AUU', 'AUU', 'ATT', 'ATU', 'ATU']
+        sgc = GeneticCode(self.sgc)
+        for i in variant_codons:
+            self.assertEqual(sgc[i], 'I')
+        # full check for the standard code
+        codons = [a + b + c for a in 'UCAG' for b in 'TCAG' for c in 'UCAG']
+        for codon, aa in zip(codons, self.sgc):
+            self.assertEqual(sgc[codon], aa)
+        # full check for another code
+        allg = GeneticCode(self.allg)
+        for codon, aa in zip(codons, self.allg):
+            self.assertEqual(allg[codon], aa)
+        # check that degenerate codon returns X
+        self.assertEqual(sgc['NNN'], 'X')
+
+    def test_getitem_aa(self):
+        """GeneticCode getitem should return codon set for aa"""
+        # for all G, should return all the codons (in some order)
+        allg = GeneticCode(self.allg)
+        codons = [a + b + c for a in 'TCAG' for b in 'TCAG' for c in 'TCAG']
+        g_codons = allg['G']
+        codons_copy = codons[:]
+        self.assertEqual(g_codons, codons_copy)
+
+        # check some known cases in the standard genetic code
+        sgc = GeneticCode(self.sgc)
+        exp_ile = ['ATT', 'ATC', 'ATA']
+        obs_ile = sgc['I']
+        self.assertEqual(obs_ile, exp_ile)
+
+        exp_arg = ['AGA', 'AGG', 'CGT', 'CGC', 'CGA', 'CGG']
+        obs_arg = sgc['R']
+        if hasattr(self, 'assertItemsEqual'):
+            self.assertItemsEqual(obs_arg, exp_arg)
+        else:
+            self.assertCountEqual(obs_arg, exp_arg)
+
+        exp_leu = ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG']
+        obs_leu = sgc['L']
+        self.assertEqual(obs_leu, exp_leu)
+
+        exp_met = ['ATG']
+        obs_met = sgc['M']
+        self.assertEqual(obs_met, exp_met)
+
+        # unknown aa should return []
+        self.assertEqual(sgc['U'], [])
+
+    def test_getitem_invalid_length(self):
+        """GeneticCode getitem raises InvalidCodonError on wrong length"""
+        sgc = GeneticCode(self.sgc)
+        self.assertRaises(InvalidCodonError, sgc.__getitem__, 'AAAA')
+        self.assertRaises(InvalidCodonError, sgc.__getitem__, 'AA')
+
+    def test_blocks(self):
+        """GeneticCode blocks should return correct list"""
+        sgc = GeneticCode(self.sgc)
+        exp_blocks = [
+            ['TTT', 'TTC', ],
+            ['TTA', 'TTG', ],
+            ['TCT', 'TCC', 'TCA', 'TCG'],
+            ['TAT', 'TAC'],
+            ['TAA', 'TAG'],
+            ['TGT', 'TGC'],
+            ['TGA'],
+            ['TGG'],
+            ['CTT', 'CTC', 'CTA', 'CTG'],
+            ['CCT', 'CCC', 'CCA', 'CCG'],
+            ['CAT', 'CAC'],
+            ['CAA', 'CAG'],
+            ['CGT', 'CGC', 'CGA', 'CGG'],
+            ['ATT', 'ATC'],
+            ['ATA', ],
+            ['ATG', ],
+            ['ACT', 'ACC', 'ACA', 'ACG'],
+            ['AAT', 'AAC'],
+            ['AAA', 'AAG'],
+            ['AGT', 'AGC'],
+            ['AGA', 'AGG'],
+            ['GTT', 'GTC', 'GTA', 'GTG'],
+            ['GCT', 'GCC', 'GCA', 'GCG'],
+            ['GAT', 'GAC'],
+            ['GAA', 'GAG'],
+            ['GGT', 'GGC', 'GGA', 'GGG'],
+        ]
+        self.assertEqual(sgc.blocks, exp_blocks)
+
+    def test_anticodons(self):
+        """GeneticCode anticodons should return correct list"""
+        sgc = GeneticCode(self.sgc)
+        exp_anticodons = {
+            'F': ['AAA', 'GAA', ],
+            'L': ['TAA', 'CAA', 'AAG', 'GAG', 'TAG', 'CAG'],
+            'Y': ['ATA', 'GTA'],
+            '*': ['TTA', 'CTA', 'TCA'],
+            'C': ['ACA', 'GCA'],
+            'W': ['CCA'],
+            'S': ['AGA', 'GGA', 'TGA', 'CGA', 'ACT', 'GCT'],
+            'P': ['AGG', 'GGG', 'TGG', 'CGG'],
+            'H': ['ATG', 'GTG'],
+            'Q': ['TTG', 'CTG'],
+            'R': ['ACG', 'GCG', 'TCG', 'CCG', 'TCT', 'CCT'],
+            'I': ['AAT', 'GAT', 'TAT'],
+            'M': ['CAT', ],
+            'T': ['AGT', 'GGT', 'TGT', 'CGT'],
+            'N': ['ATT', 'GTT'],
+            'K': ['TTT', 'CTT'],
+            'V': ['AAC', 'GAC', 'TAC', 'CAC'],
+            'A': ['AGC', 'GGC', 'TGC', 'CGC'],
+            'D': ['ATC', 'GTC'],
+            'E': ['TTC', 'CTC'],
+            'G': ['ACC', 'GCC', 'TCC', 'CCC'],
+        }
+        self.assertEqual(sgc.anticodons, exp_anticodons)
+
+    def test_translate(self):
+        """GeneticCode translate should return correct amino acid string"""
+        allg = GeneticCode(self.allg)
+        sgc = GeneticCode(self.sgc)
+        mt = GeneticCode(self.mt)
+
+        seq = 'AUGCAUGACUUUUGA'
+        #      .  .  .  .  .        markers for codon start
+        self.assertEqual(allg.translate(seq), Protein('GGGGG'))
+        self.assertEqual(allg.translate(seq, 1), Protein('GGGG'))
+        self.assertEqual(allg.translate(seq, 2), Protein('GGGG'))
+        self.assertEqual(allg.translate(seq, 3), Protein('GGGG'))
+        self.assertEqual(allg.translate(seq, 4), Protein('GGG'))
+        self.assertEqual(allg.translate(seq, 12), Protein('G'))
+        self.assertEqual(allg.translate(seq, 14), Protein(''))
+        self.assertRaises(ValueError, allg.translate, seq, 15)
+        self.assertRaises(ValueError, allg.translate, seq, 20)
+
+        self.assertEqual(sgc.translate(seq), Protein('MHDF*'))
+        self.assertEqual(sgc.translate(seq, 3), Protein('HDF*'))
+        self.assertEqual(sgc.translate(seq, 6), Protein('DF*'))
+        self.assertEqual(sgc.translate(seq, 9), Protein('F*'))
+        self.assertEqual(sgc.translate(seq, 12), Protein('*'))
+        self.assertEqual(sgc.translate(seq, 14), Protein(''))
+        # check shortest translatable sequences
+        self.assertEqual(sgc.translate('AAA'), Protein('K'))
+        self.assertEqual(sgc.translate(''), Protein(''))
+
+        # check that different code gives different results
+        self.assertEqual(mt.translate(seq), Protein('MHDFW'))
+
+        # check translation with invalid codon(s)
+        self.assertEqual(sgc.translate('AAANNNCNC123UUU'), Protein('KXXXF'))
+
+    def test_translate_six_frames(self):
+        """GeneticCode translate_six_frames provides six-frame translation"""
+
+        class fake_rna(str):
+
+            """Fake RNA class with reverse-complement"""
+            def __new__(cls, seq, rev):
+                return str.__new__(cls, seq)
+
+            def __init__(self, seq, rev):
+                self.seq = seq
+                self.rev = rev
+
+            def rc(self):
+                return self.rev
+
+        test_rna = fake_rna('AUGCUAACAUAAA', 'UUUAUGUUAGCAU')
+        #                    .  .  .  .  .    .  .  .  .  .
+        sgc = GeneticCode(self.sgc)
+        self.assertEqual(sgc.translate_six_frames(test_rna), [
+            Protein('MLT*'), Protein('C*HK'), Protein('ANI'), Protein('FMLA'),
+            Protein('LC*H'), Protein('YVS')])
+
+        # should also actually work with an RNA or DNA sequence!!!
+        test_rna = RNA('AUGCUAACAUAAA')
+        self.assertEqual(sgc.translate_six_frames(test_rna), [
+            Protein('MLT*'), Protein('C*HK'), Protein('ANI'), Protein('FMLA'),
+            Protein('LC*H'), Protein('YVS')])
+
+    def test_stop_indexes(self):
+        """should return stop codon indexes for a specified frame"""
+        sgc = GeneticCode(self.sgc)
+        seq = DNA('ATGCTAACATAAA')
+        expected = [[9], [4], []]
+        for frame, expect in enumerate(expected):
+            got = sgc.get_stop_indices(seq, start=frame)
+            self.assertEqual(got, expect)
+
+    def test_synonyms(self):
+        """GeneticCode synonyms should return aa -> codon set mapping."""
+        expected_synonyms = {
+            'A': ['GCT', 'GCC', 'GCA', 'GCG'],
+            'C': ['TGT', 'TGC'],
+            'D': ['GAT', 'GAC'],
+            'E': ['GAA', 'GAG'],
+            'F': ['TTT', 'TTC'],
+            'G': ['GGT', 'GGC', 'GGA', 'GGG'],
+            'H': ['CAT', 'CAC'],
+            'I': ['ATT', 'ATC', 'ATA'],
+            'K': ['AAA', 'AAG'],
+            'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
+            'M': ['ATG'],
+            'N': ['AAT', 'AAC'],
+            'P': ['CCT', 'CCC', 'CCA', 'CCG'],
+            'Q': ['CAA', 'CAG'],
+            'R': ['AGA', 'AGG', 'CGT', 'CGC', 'CGA', 'CGG'],
+            'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
+            'T': ['ACT', 'ACC', 'ACA', 'ACG'],
+            'V': ['GTT', 'GTC', 'GTA', 'GTG'],
+            'W': ['TGG'],
+            'Y': ['TAT', 'TAC'],
+            '*': ['TAA', 'TAG', 'TGA'],
+        }
+        obs_synonyms = GeneticCode(self.sgc).synonyms
+        # note that the lists will be arbitrary-order
+        for i in expected_synonyms:
+            if hasattr(self, 'assertItemsEqual'):
+                self.assertItemsEqual(obs_synonyms[i], expected_synonyms[i])
+            else:
+                self.assertCountEqual(obs_synonyms[i], expected_synonyms[i])
+
+    def test_genetic_code_with_too_many_args(self):
+        with self.assertRaises(TypeError):
+            genetic_code(1, 2)
+
+    def test_genetic_code_with_invalid_id(self):
+        with self.assertRaises(ValueError):
+            genetic_code(30)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/sequence/tests/test_sequence.py b/skbio/sequence/tests/test_sequence.py
new file mode 100644
index 0000000..8d3799d
--- /dev/null
+++ b/skbio/sequence/tests/test_sequence.py
@@ -0,0 +1,1418 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.standard_library import hooks
+
+from re import compile as re_compile
+from collections import Counter, defaultdict
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio import (
+    BiologicalSequence, NucleotideSequence, DNASequence, RNASequence,
+    ProteinSequence)
+from skbio.sequence import BiologicalSequenceError
+
+with hooks():
+    from itertools import zip_longest
+
+
+class BiologicalSequenceTests(TestCase):
+
+    def setUp(self):
+        self.b1 = BiologicalSequence('GATTACA', quality=range(7))
+        self.b2 = BiologicalSequence(
+            'ACCGGTACC', id="test-seq-2",
+            description="A test sequence")
+        self.b3 = BiologicalSequence(
+            'GREG', id="test-seq-3", description="A protein sequence")
+        self.b4 = BiologicalSequence(
+            'PRTEIN', id="test-seq-4")
+        self.b5 = BiologicalSequence(
+            'LLPRTEIN', description="some description")
+        self.b6 = BiologicalSequence('ACGTACGTACGT')
+        self.b7 = BiologicalSequence('..--..', quality=range(6))
+        self.b8 = BiologicalSequence('HE..--..LLO', id='hello',
+                                     description='gapped hello',
+                                     quality=range(11))
+
+    def test_init_varied_input(self):
+        # init as string
+        b = BiologicalSequence('ACCGGXZY')
+        self.assertEqual(str(b), 'ACCGGXZY')
+        self.assertEqual(b.id, "")
+        self.assertEqual(b.description, "")
+
+        # init as string with optional values
+        b = BiologicalSequence(
+            'ACCGGXZY', 'test-seq-1', 'The first test sequence')
+        self.assertEqual(str(b), 'ACCGGXZY')
+        self.assertEqual(b.id, "test-seq-1")
+        self.assertEqual(b.description, "The first test sequence")
+
+        # test init as a different string
+        b = BiologicalSequence('WRRTY')
+        self.assertEqual(str(b), 'WRRTY')
+
+        # init as list
+        b = BiologicalSequence(list('ACCGGXZY'))
+        self.assertEqual(str(b), 'ACCGGXZY')
+        self.assertEqual(b.id, "")
+        self.assertEqual(b.description, "")
+
+        # init as tuple
+        b = BiologicalSequence(tuple('ACCGGXZY'))
+        self.assertEqual(str(b), 'ACCGGXZY')
+        self.assertEqual(b.id, "")
+        self.assertEqual(b.description, "")
+
+    def test_init_with_validation(self):
+        self.assertRaises(BiologicalSequenceError, BiologicalSequence, "ACC",
+                          validate=True)
+        try:
+            # no error raised when only allow characters are passed
+            BiologicalSequence("..--..", validate=True)
+        except BiologicalSequenceError:
+            self.assertTrue(False)
+
+    def test_init_with_invalid_quality(self):
+        # invalid dtype
+        with self.assertRaises(TypeError):
+            BiologicalSequence('ACGT', quality=[2, 3, 4.1, 5])
+
+        # wrong number of dimensions (2-D)
+        with self.assertRaisesRegexp(BiologicalSequenceError, '1-D'):
+            BiologicalSequence('ACGT', quality=[[2, 3], [4, 5]])
+
+        # wrong number of elements
+        with self.assertRaisesRegexp(BiologicalSequenceError, '\(3\).*\(4\)'):
+            BiologicalSequence('ACGT', quality=[2, 3, 4])
+
+        # negatives
+        with self.assertRaisesRegexp(BiologicalSequenceError,
+                                     'quality scores.*greater than.*zero'):
+            BiologicalSequence('ACGT', quality=[2, 3, -1, 4])
+
+    def test_contains(self):
+        self.assertTrue('G' in self.b1)
+        self.assertFalse('g' in self.b1)
+
+    def test_eq_and_ne(self):
+        self.assertTrue(self.b1 == self.b1)
+        self.assertTrue(self.b2 == self.b2)
+        self.assertTrue(self.b3 == self.b3)
+
+        self.assertTrue(self.b1 != self.b3)
+        self.assertTrue(self.b1 != self.b2)
+        self.assertTrue(self.b2 != self.b3)
+
+        # identicial sequences of the same type are equal, even if they have
+        # different ids, descriptions, and/or quality
+        self.assertTrue(
+            BiologicalSequence('ACGT') == BiologicalSequence('ACGT'))
+        self.assertTrue(
+            BiologicalSequence('ACGT', id='a') ==
+            BiologicalSequence('ACGT', id='b'))
+        self.assertTrue(
+            BiologicalSequence('ACGT', description='c') ==
+            BiologicalSequence('ACGT', description='d'))
+        self.assertTrue(
+            BiologicalSequence('ACGT', id='a', description='c') ==
+            BiologicalSequence('ACGT', id='b', description='d'))
+        self.assertTrue(
+            BiologicalSequence('ACGT', id='a', description='c',
+                               quality=[1, 2, 3, 4]) ==
+            BiologicalSequence('ACGT', id='b', description='d',
+                               quality=[5, 6, 7, 8]))
+
+        # different type causes sequences to not be equal
+        self.assertFalse(
+            BiologicalSequence('ACGT') == NucleotideSequence('ACGT'))
+
+    def test_getitem(self):
+        # use equals method to ensure that id, description, and sliced
+        # quality are correctly propagated to the resulting sequence
+        self.assertTrue(self.b1[0].equals(
+            BiologicalSequence('G', quality=(0,))))
+
+        self.assertTrue(self.b1[:].equals(
+            BiologicalSequence('GATTACA', quality=range(7))))
+
+        self.assertTrue(self.b1[::-1].equals(
+            BiologicalSequence('ACATTAG', quality=range(7)[::-1])))
+
+        # test a sequence without quality scores
+        b = BiologicalSequence('ACGT', id='foo', description='bar')
+        self.assertTrue(b[2:].equals(
+            BiologicalSequence('GT', id='foo', description='bar')))
+        self.assertTrue(b[2].equals(
+            BiologicalSequence('G', id='foo', description='bar')))
+
+    def test_getitem_indices(self):
+        # no ordering, repeated items
+        self.assertTrue(self.b1[[3, 5, 4, 0, 5, 0]].equals(
+            BiologicalSequence('TCAGCG', quality=(3, 5, 4, 0, 5, 0))))
+
+        # empty list
+        self.assertTrue(self.b1[[]].equals(BiologicalSequence('', quality=())))
+
+        # empty tuple
+        self.assertTrue(self.b1[()].equals(BiologicalSequence('', quality=())))
+
+        # single item
+        self.assertTrue(
+            self.b1[[2]].equals(BiologicalSequence('T', quality=(2,))))
+
+        # negatives
+        self.assertTrue(self.b1[[2, -2, 4]].equals(
+            BiologicalSequence('TCA', quality=(2, 5, 4))))
+
+        # tuple
+        self.assertTrue(self.b1[1, 2, 3].equals(
+            BiologicalSequence('ATT', quality=(1, 2, 3))))
+        self.assertTrue(self.b1[(1, 2, 3)].equals(
+            BiologicalSequence('ATT', quality=(1, 2, 3))))
+
+        # test a sequence without quality scores
+        self.assertTrue(self.b2[5, 4, 1].equals(
+            BiologicalSequence('TGC', id='test-seq-2',
+                               description='A test sequence')))
+
+    def test_getitem_wrong_type(self):
+        with self.assertRaises(TypeError):
+            self.b1['1']
+
+    def test_getitem_out_of_range(self):
+        # seq with quality
+        with self.assertRaises(IndexError):
+            self.b1[42]
+        with self.assertRaises(IndexError):
+            self.b1[[1, 0, 23, 3]]
+
+        # seq without quality
+        with self.assertRaises(IndexError):
+            self.b2[43]
+        with self.assertRaises(IndexError):
+            self.b2[[2, 3, 22, 1]]
+
+    def test_hash(self):
+        self.assertTrue(isinstance(hash(self.b1), int))
+
+    def test_iter(self):
+        b1_iter = iter(self.b1)
+        for actual, expected in zip(b1_iter, "GATTACA"):
+            self.assertEqual(actual, expected)
+
+        self.assertRaises(StopIteration, lambda: next(b1_iter))
+
+    def _compare_k_words_results(self, observed, expected):
+        for obs, exp in zip_longest(observed, expected, fillvalue=None):
+            # use equals to compare quality, id, description, sequence, and
+            # type
+            self.assertTrue(obs.equals(exp))
+
+    def test_k_words_overlapping_true(self):
+        expected = [
+            BiologicalSequence('G', quality=[0]),
+            BiologicalSequence('A', quality=[1]),
+            BiologicalSequence('T', quality=[2]),
+            BiologicalSequence('T', quality=[3]),
+            BiologicalSequence('A', quality=[4]),
+            BiologicalSequence('C', quality=[5]),
+            BiologicalSequence('A', quality=[6])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(1, overlapping=True), expected)
+
+        expected = [
+            BiologicalSequence('GA', quality=[0, 1]),
+            BiologicalSequence('AT', quality=[1, 2]),
+            BiologicalSequence('TT', quality=[2, 3]),
+            BiologicalSequence('TA', quality=[3, 4]),
+            BiologicalSequence('AC', quality=[4, 5]),
+            BiologicalSequence('CA', quality=[5, 6])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(2, overlapping=True), expected)
+
+        expected = [
+            BiologicalSequence('GAT', quality=[0, 1, 2]),
+            BiologicalSequence('ATT', quality=[1, 2, 3]),
+            BiologicalSequence('TTA', quality=[2, 3, 4]),
+            BiologicalSequence('TAC', quality=[3, 4, 5]),
+            BiologicalSequence('ACA', quality=[4, 5, 6])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(3, overlapping=True), expected)
+
+        expected = [
+            BiologicalSequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(7, overlapping=True), expected)
+
+        self.assertEqual(list(self.b1.k_words(8, overlapping=True)), [])
+
+    def test_k_words_overlapping_false(self):
+        expected = [
+            BiologicalSequence('G', quality=[0]),
+            BiologicalSequence('A', quality=[1]),
+            BiologicalSequence('T', quality=[2]),
+            BiologicalSequence('T', quality=[3]),
+            BiologicalSequence('A', quality=[4]),
+            BiologicalSequence('C', quality=[5]),
+            BiologicalSequence('A', quality=[6])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(1, overlapping=False), expected)
+
+        expected = [
+            BiologicalSequence('GA', quality=[0, 1]),
+            BiologicalSequence('TT', quality=[2, 3]),
+            BiologicalSequence('AC', quality=[4, 5])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(2, overlapping=False), expected)
+
+        expected = [
+            BiologicalSequence('GAT', quality=[0, 1, 2]),
+            BiologicalSequence('TAC', quality=[3, 4, 5])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(3, overlapping=False), expected)
+
+        expected = [
+            BiologicalSequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
+        ]
+        self._compare_k_words_results(
+            self.b1.k_words(7, overlapping=False), expected)
+
+        self.assertEqual(list(self.b1.k_words(8, overlapping=False)), [])
+
+    def test_k_words_invalid_k(self):
+        with self.assertRaises(ValueError):
+            list(self.b1.k_words(0))
+
+        with self.assertRaises(ValueError):
+            list(self.b1.k_words(-42))
+
+    def test_k_words_different_sequences(self):
+        expected = [
+            BiologicalSequence('HE.', quality=[0, 1, 2], id='hello',
+                               description='gapped hello'),
+            BiologicalSequence('.--', quality=[3, 4, 5], id='hello',
+                               description='gapped hello'),
+            BiologicalSequence('..L', quality=[6, 7, 8], id='hello',
+                               description='gapped hello')
+        ]
+        self._compare_k_words_results(
+            self.b8.k_words(3, overlapping=False), expected)
+
+        b = BiologicalSequence('')
+        self.assertEqual(list(b.k_words(3)), [])
+
+    def test_k_word_counts(self):
+        # overlapping = True
+        expected = Counter('GATTACA')
+        self.assertEqual(self.b1.k_word_counts(1, overlapping=True),
+                         expected)
+        expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
+        self.assertEqual(self.b1.k_word_counts(3, overlapping=True),
+                         expected)
+
+        # overlapping = False
+        expected = Counter(['GAT', 'TAC'])
+        self.assertEqual(self.b1.k_word_counts(3, overlapping=False),
+                         expected)
+        expected = Counter(['GATTACA'])
+        self.assertEqual(self.b1.k_word_counts(7, overlapping=False),
+                         expected)
+
+    def test_k_word_frequencies(self):
+        # overlapping = True
+        expected = defaultdict(float)
+        expected['A'] = 3/7.
+        expected['C'] = 1/7.
+        expected['G'] = 1/7.
+        expected['T'] = 2/7.
+        self.assertEqual(self.b1.k_word_frequencies(1, overlapping=True),
+                         expected)
+        expected = defaultdict(float)
+        expected['GAT'] = 1/5.
+        expected['ATT'] = 1/5.
+        expected['TTA'] = 1/5.
+        expected['TAC'] = 1/5.
+        expected['ACA'] = 1/5.
+        self.assertEqual(self.b1.k_word_frequencies(3, overlapping=True),
+                         expected)
+
+        # overlapping = False
+        expected = defaultdict(float)
+        expected['GAT'] = 1/2.
+        expected['TAC'] = 1/2.
+        self.assertEqual(self.b1.k_word_frequencies(3, overlapping=False),
+                         expected)
+        expected = defaultdict(float)
+        expected['GATTACA'] = 1.0
+        self.assertEqual(self.b1.k_word_frequencies(7, overlapping=False),
+                         expected)
+        expected = defaultdict(float)
+        empty = BiologicalSequence('')
+        self.assertEqual(empty.k_word_frequencies(1, overlapping=False),
+                         expected)
+
+    def test_k_word_frequencies_floating_point_precision(self):
+        # Test that a sequence having no variation in k-words yields a
+        # frequency of exactly 1.0. Note that it is important to use
+        # self.assertEqual here instead of self.assertAlmostEqual because we
+        # want to test for exactly 1.0. A previous implementation of
+        # BiologicalSequence.k_word_frequencies added (1 / num_words) for each
+        # occurrence of a k-word to compute the frequencies (see
+        # https://github.com/biocore/scikit-bio/issues/801). In certain cases,
+        # this yielded a frequency slightly less than 1.0 due to roundoff
+        # error. The test case here uses a sequence with 10 characters that are
+        # all identical and computes k-word frequencies with k=1. This test
+        # case exposes the roundoff error present in the previous
+        # implementation because there are 10 k-words (which are all
+        # identical), so 1/10 added 10 times yields a number slightly less than
+        # 1.0. This occurs because 1/10 cannot be represented exactly as a
+        # floating point number.
+        seq = BiologicalSequence('AAAAAAAAAA')
+        self.assertEqual(seq.k_word_frequencies(1),
+                         defaultdict(float, {'A': 1.0}))
+
+    def test_len(self):
+        self.assertEqual(len(self.b1), 7)
+        self.assertEqual(len(self.b2), 9)
+        self.assertEqual(len(self.b3), 4)
+
+    def test_repr(self):
+        self.assertEqual(repr(self.b1),
+                         "<BiologicalSequence: GATTACA (length: 7)>")
+        self.assertEqual(repr(self.b6),
+                         "<BiologicalSequence: ACGTACGTAC... (length: 12)>")
+
+    def test_reversed(self):
+        b1_reversed = reversed(self.b1)
+        for actual, expected in zip(b1_reversed, "ACATTAG"):
+            self.assertEqual(actual, expected)
+
+        self.assertRaises(StopIteration, lambda: next(b1_reversed))
+
+    def test_str(self):
+        self.assertEqual(str(self.b1), "GATTACA")
+        self.assertEqual(str(self.b2), "ACCGGTACC")
+        self.assertEqual(str(self.b3), "GREG")
+
+    def test_alphabet(self):
+        self.assertEqual(self.b1.alphabet(), set())
+
+    def test_gap_alphabet(self):
+        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
+
+    def test_sequence(self):
+        self.assertEqual(self.b1.sequence, "GATTACA")
+        self.assertEqual(self.b2.sequence, "ACCGGTACC")
+        self.assertEqual(self.b3.sequence, "GREG")
+
+    def test_id(self):
+        self.assertEqual(self.b1.id, "")
+        self.assertEqual(self.b2.id, "test-seq-2")
+        self.assertEqual(self.b3.id, "test-seq-3")
+
+    def test_description(self):
+        self.assertEqual(self.b1.description, "")
+        self.assertEqual(self.b2.description, "A test sequence")
+        self.assertEqual(self.b3.description, "A protein sequence")
+
+    def test_quality(self):
+        a = BiologicalSequence('ACA', quality=(22, 22, 1))
+
+        # should get back a read-only numpy array of int dtype
+        self.assertIsInstance(a.quality, np.ndarray)
+        self.assertEqual(a.quality.dtype, np.int)
+        npt.assert_equal(a.quality, np.array((22, 22, 1)))
+
+        # test that we can't mutate the quality scores
+        with self.assertRaises(ValueError):
+            a.quality[1] = 42
+
+        # test that we can't set the property
+        with self.assertRaises(AttributeError):
+            a.quality = (22, 22, 42)
+
+    def test_quality_not_provided(self):
+        b = BiologicalSequence('ACA')
+        self.assertIs(b.quality, None)
+
+    def test_quality_scalar(self):
+        b = BiologicalSequence('G', quality=2)
+
+        self.assertIsInstance(b.quality, np.ndarray)
+        self.assertEqual(b.quality.dtype, np.int)
+        self.assertEqual(b.quality.shape, (1,))
+        npt.assert_equal(b.quality, np.array([2]))
+
+    def test_quality_empty(self):
+        b = BiologicalSequence('', quality=[])
+
+        self.assertIsInstance(b.quality, np.ndarray)
+        self.assertEqual(b.quality.dtype, np.int)
+        self.assertEqual(b.quality.shape, (0,))
+        npt.assert_equal(b.quality, np.array([]))
+
+    def test_quality_no_copy(self):
+        qual = np.array([22, 22, 1])
+        a = BiologicalSequence('ACA', quality=qual)
+        self.assertIs(a.quality, qual)
+
+        with self.assertRaises(ValueError):
+            a.quality[1] = 42
+
+        with self.assertRaises(ValueError):
+            qual[1] = 42
+
+    def test_has_quality(self):
+        a = BiologicalSequence('ACA', quality=(5, 4, 67))
+        self.assertTrue(a.has_quality())
+
+        b = BiologicalSequence('ACA')
+        self.assertFalse(b.has_quality())
+
+    def test_copy_default_behavior(self):
+        # minimal sequence, sequence with all optional attributes present, and
+        # a subclass of BiologicalSequence
+        for seq in self.b6, self.b8, RNASequence('ACGU', id='rna seq'):
+            copy = seq.copy()
+            self.assertTrue(seq.equals(copy))
+            self.assertFalse(seq is copy)
+
+    def test_copy_update_single_attribute(self):
+        copy = self.b8.copy(id='new id')
+        self.assertFalse(self.b8 is copy)
+
+        # they don't compare equal when we compare all attributes...
+        self.assertFalse(self.b8.equals(copy))
+
+        # ...but they *do* compare equal when we ignore id, as that was the
+        # only attribute that changed
+        self.assertTrue(self.b8.equals(copy, ignore=['id']))
+
+        # id should be what we specified in the copy call...
+        self.assertEqual(copy.id, 'new id')
+
+        # ..and shouldn't have changed on the original sequence
+        self.assertEqual(self.b8.id, 'hello')
+
+    def test_copy_update_multiple_attributes(self):
+        copy = self.b8.copy(id='new id', quality=range(20, 25),
+                            sequence='ACGTA', description='new desc')
+        self.assertFalse(self.b8 is copy)
+        self.assertFalse(self.b8.equals(copy))
+
+        # attributes should be what we specified in the copy call...
+        self.assertEqual(copy.id, 'new id')
+        npt.assert_equal(copy.quality, np.array([20, 21, 22, 23, 24]))
+        self.assertEqual(copy.sequence, 'ACGTA')
+        self.assertEqual(copy.description, 'new desc')
+
+        # ..and shouldn't have changed on the original sequence
+        self.assertEqual(self.b8.id, 'hello')
+        npt.assert_equal(self.b8.quality, range(11))
+        self.assertEqual(self.b8.sequence, 'HE..--..LLO')
+        self.assertEqual(self.b8.description, 'gapped hello')
+
+    def test_copy_invalid_kwargs(self):
+        with self.assertRaises(TypeError):
+            self.b2.copy(id='bar', unrecognized_kwarg='baz')
+
+    def test_copy_extra_non_attribute_kwargs(self):
+        # test that we can pass through additional kwargs to the constructor
+        # that aren't related to biological sequence attributes (i.e., they
+        # aren't state that has to be copied)
+
+        # create an invalid DNA sequence
+        a = DNASequence('FOO', description='foo')
+
+        # should be able to copy it b/c validate defaults to False
+        b = a.copy()
+        self.assertTrue(a.equals(b))
+        self.assertFalse(a is b)
+
+        # specifying validate should raise an error when the copy is
+        # instantiated
+        with self.assertRaises(BiologicalSequenceError):
+            a.copy(validate=True)
+
+    def test_equals_true(self):
+        # sequences match, all other attributes are not provided
+        self.assertTrue(
+            BiologicalSequence('ACGT').equals(BiologicalSequence('ACGT')))
+
+        # all attributes are provided and match
+        a = BiologicalSequence('ACGT', id='foo', description='abc',
+                               quality=[1, 2, 3, 4])
+        b = BiologicalSequence('ACGT', id='foo', description='abc',
+                               quality=[1, 2, 3, 4])
+        self.assertTrue(a.equals(b))
+
+        # ignore type
+        a = BiologicalSequence('ACGT')
+        b = DNASequence('ACGT')
+        self.assertTrue(a.equals(b, ignore=['type']))
+
+        # ignore id
+        a = BiologicalSequence('ACGT', id='foo')
+        b = BiologicalSequence('ACGT', id='bar')
+        self.assertTrue(a.equals(b, ignore=['id']))
+
+        # ignore description
+        a = BiologicalSequence('ACGT', description='foo')
+        b = BiologicalSequence('ACGT', description='bar')
+        self.assertTrue(a.equals(b, ignore=['description']))
+
+        # ignore quality
+        a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
+        b = BiologicalSequence('ACGT', quality=[5, 6, 7, 8])
+        self.assertTrue(a.equals(b, ignore=['quality']))
+
+        # ignore sequence
+        a = BiologicalSequence('ACGA')
+        b = BiologicalSequence('ACGT')
+        self.assertTrue(a.equals(b, ignore=['sequence']))
+
+        # ignore everything
+        a = BiologicalSequence('ACGA', id='foo', description='abc',
+                               quality=[1, 2, 3, 4])
+        b = DNASequence('ACGT', id='bar', description='def',
+                        quality=[5, 6, 7, 8])
+        self.assertTrue(a.equals(b, ignore=['quality', 'description', 'id',
+                                            'sequence', 'type']))
+
+    def test_equals_false(self):
+        # type mismatch
+        a = BiologicalSequence('ACGT', id='foo', description='abc',
+                               quality=[1, 2, 3, 4])
+        b = NucleotideSequence('ACGT', id='bar', description='def',
+                               quality=[5, 6, 7, 8])
+        self.assertFalse(a.equals(b, ignore=['quality', 'description', 'id']))
+
+        # id mismatch
+        a = BiologicalSequence('ACGT', id='foo')
+        b = BiologicalSequence('ACGT', id='bar')
+        self.assertFalse(a.equals(b))
+
+        # description mismatch
+        a = BiologicalSequence('ACGT', description='foo')
+        b = BiologicalSequence('ACGT', description='bar')
+        self.assertFalse(a.equals(b))
+
+        # quality mismatch (both provided)
+        a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
+        b = BiologicalSequence('ACGT', quality=[1, 2, 3, 5])
+        self.assertFalse(a.equals(b))
+
+        # quality mismatch (one provided)
+        a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
+        b = BiologicalSequence('ACGT')
+        self.assertFalse(a.equals(b))
+
+        # sequence mismatch
+        a = BiologicalSequence('ACGT')
+        b = BiologicalSequence('TGCA')
+        self.assertFalse(a.equals(b))
+
+    def test_count(self):
+        self.assertEqual(self.b1.count('A'), 3)
+        self.assertEqual(self.b1.count('T'), 2)
+        self.assertEqual(self.b1.count('TT'), 1)
+
+    def test_degap(self):
+        # use equals method to ensure that id, description, and filtered
+        # quality are correctly propagated to the resulting sequence
+
+        # no filtering, has quality
+        self.assertTrue(self.b1.degap().equals(self.b1))
+
+        # no filtering, doesn't have quality
+        self.assertTrue(self.b2.degap().equals(self.b2))
+
+        # everything is filtered, has quality
+        self.assertTrue(self.b7.degap().equals(
+            BiologicalSequence('', quality=[])))
+
+        # some filtering, has quality
+        self.assertTrue(self.b8.degap().equals(
+            BiologicalSequence('HELLO', id='hello', description='gapped hello',
+                               quality=[0, 1, 8, 9, 10])))
+
+    def test_distance(self):
+        # note that test_hamming_distance covers default behavior more
+        # extensively
+        self.assertEqual(self.b1.distance(self.b1), 0.0)
+        self.assertEqual(self.b1.distance(BiologicalSequence('GATTACC')), 1./7)
+
+        def dumb_distance(x, y):
+            return 42
+
+        self.assertEqual(
+            self.b1.distance(self.b1, distance_fn=dumb_distance), 42)
+
+    def test_distance_unequal_length(self):
+        # Hamming distance (default) requires that sequences are of equal
+        # length
+        with self.assertRaises(BiologicalSequenceError):
+            self.b1.distance(self.b2)
+
+        # alternate distance functions don't have that requirement (unless
+        # it's implemented within the provided distance function)
+        def dumb_distance(x, y):
+            return 42
+        self.assertEqual(
+            self.b1.distance(self.b2, distance_fn=dumb_distance), 42)
+
+    def test_fraction_diff(self):
+        self.assertEqual(self.b1.fraction_diff(self.b1), 0., 5)
+        self.assertEqual(
+            self.b1.fraction_diff(BiologicalSequence('GATTACC')), 1. / 7., 5)
+
+    def test_fraction_same(self):
+        self.assertAlmostEqual(self.b1.fraction_same(self.b1), 1., 5)
+        self.assertAlmostEqual(
+            self.b1.fraction_same(BiologicalSequence('GATTACC')), 6. / 7., 5)
+
+    def test_gap_maps(self):
+        # in sequence with no gaps, the gap_maps are identical
+        self.assertEqual(self.b1.gap_maps(),
+                         ([0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]))
+        # in sequence with all gaps, the map of degapped to gapped is the empty
+        # list (bc its length is 0), and the map of gapped to degapped is all
+        # None
+        self.assertEqual(self.b7.gap_maps(),
+                         ([], [None, None, None, None, None, None]))
+
+        self.assertEqual(self.b8.gap_maps(),
+                         ([0, 1, 8, 9, 10],
+                          [0, 1, None, None, None, None, None, None, 2, 3, 4]))
+
+        # example from the gap_maps doc string
+        self.assertEqual(BiologicalSequence('-ACCGA-TA-').gap_maps(),
+                         ([1, 2, 3, 4, 5, 7, 8],
+                          [None, 0, 1, 2, 3, 4, None, 5, 6, None]))
+
+    def test_gap_vector(self):
+        self.assertEqual(self.b1.gap_vector(),
+                         [False] * len(self.b1))
+        self.assertEqual(self.b7.gap_vector(),
+                         [True] * len(self.b7))
+        self.assertEqual(self.b8.gap_vector(),
+                         [False, False, True, True, True, True,
+                          True, True, False, False, False])
+
+    def test_unsupported_characters(self):
+        self.assertEqual(self.b1.unsupported_characters(), set('GATC'))
+        self.assertEqual(self.b7.unsupported_characters(), set())
+
+    def test_has_unsupported_characters(self):
+        self.assertTrue(self.b1.has_unsupported_characters())
+        self.assertFalse(self.b7.has_unsupported_characters())
+
+    def test_index(self):
+        self.assertEqual(self.b1.index('G'), 0)
+        self.assertEqual(self.b1.index('A'), 1)
+        self.assertEqual(self.b1.index('AC'), 4)
+        self.assertRaises(ValueError, self.b1.index, 'x')
+
+    def test_is_gap(self):
+        self.assertTrue(self.b1.is_gap('.'))
+        self.assertTrue(self.b1.is_gap('-'))
+        self.assertFalse(self.b1.is_gap('A'))
+        self.assertFalse(self.b1.is_gap('x'))
+        self.assertFalse(self.b1.is_gap(' '))
+        self.assertFalse(self.b1.is_gap(''))
+
+    def test_is_gapped(self):
+        self.assertFalse(self.b1.is_gapped())
+        self.assertFalse(self.b2.is_gapped())
+        self.assertTrue(self.b7.is_gapped())
+        self.assertTrue(self.b8.is_gapped())
+
+    def test_is_valid(self):
+        self.assertFalse(self.b1.is_valid())
+        self.assertTrue(self.b7.is_valid())
+
+    def test_to_fasta(self):
+        self.assertEqual(self.b1.to_fasta(), ">\nGATTACA\n")
+        self.assertEqual(self.b1.to_fasta(terminal_character=""), ">\nGATTACA")
+        self.assertEqual(self.b2.to_fasta(),
+                         ">test-seq-2 A test sequence\nACCGGTACC\n")
+        self.assertEqual(self.b3.to_fasta(),
+                         ">test-seq-3 A protein sequence\nGREG\n")
+        self.assertEqual(self.b4.to_fasta(),
+                         ">test-seq-4\nPRTEIN\n")
+        self.assertEqual(self.b5.to_fasta(),
+                         "> some description\nLLPRTEIN\n")
+
+        # alt parameters
+        self.assertEqual(self.b2.to_fasta(field_delimiter=":"),
+                         ">test-seq-2:A test sequence\nACCGGTACC\n")
+        self.assertEqual(self.b2.to_fasta(terminal_character="!"),
+                         ">test-seq-2 A test sequence\nACCGGTACC!")
+        self.assertEqual(
+            self.b2.to_fasta(field_delimiter=":", terminal_character="!"),
+            ">test-seq-2:A test sequence\nACCGGTACC!")
+
+    def test_upper(self):
+        b = NucleotideSequence('GAt.ACa-', id='x', description='42',
+                               quality=range(8))
+        expected = NucleotideSequence('GAT.ACA-', id='x',
+                                      description='42', quality=range(8))
+        # use equals method to ensure that id, description, and quality are
+        # correctly propagated to the resulting sequence
+        self.assertTrue(b.upper().equals(expected))
+
+    def test_lower(self):
+        b = NucleotideSequence('GAt.ACa-', id='x', description='42',
+                               quality=range(8))
+        expected = NucleotideSequence('gat.aca-', id='x',
+                                      description='42', quality=range(8))
+        # use equals method to ensure that id, description, and quality are
+        # correctly propagated to the resulting sequence
+        self.assertTrue(b.lower().equals(expected))
+
+    def test_regex_iter(self):
+        pat = re_compile('(T+A)(CA)')
+
+        obs = list(self.b1.regex_iter(pat))
+        exp = [(2, 5, 'TTA'), (5, 7, 'CA')]
+        self.assertEqual(obs, exp)
+
+        obs = list(self.b1.regex_iter(pat, retrieve_group_0=True))
+        exp = [(2, 7, 'TTACA'), (2, 5, 'TTA'), (5, 7, 'CA')]
+        self.assertEqual(obs, exp)
+
+
+class NucelotideSequenceTests(TestCase):
+
+    def setUp(self):
+        self.empty = NucleotideSequence('')
+        self.b1 = NucleotideSequence('GATTACA')
+        self.b2 = NucleotideSequence(
+            'ACCGGUACC', id="test-seq-2",
+            description="A test sequence")
+        self.b3 = NucleotideSequence('G-AT-TG.AT.T')
+
+    def test_alphabet(self):
+        exp = {
+            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'T',
+            'W', 'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's',
+            'r', 'u', 't', 'w', 'v', 'y'
+        }
+
+        # Test calling from an instance and purely static context.
+        self.assertEqual(self.b1.alphabet(), exp)
+        self.assertEqual(NucleotideSequence.alphabet(), exp)
+
+    def test_gap_alphabet(self):
+        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
+
+    def test_complement_map(self):
+        exp = {}
+        self.assertEqual(self.b1.complement_map(), exp)
+        self.assertEqual(NucleotideSequence.complement_map(), exp)
+
+    def test_iupac_standard_characters(self):
+        exp = set("ACGTUacgtu")
+        self.assertEqual(self.b1.iupac_standard_characters(), exp)
+        self.assertEqual(NucleotideSequence.iupac_standard_characters(), exp)
+
+    def test_iupac_degeneracies(self):
+        exp = {
+            # upper
+            'B': set(['C', 'U', 'T', 'G']), 'D': set(['A', 'U', 'T', 'G']),
+            'H': set(['A', 'C', 'U', 'T']), 'K': set(['U', 'T', 'G']),
+            'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'T', 'G']),
+            'S': set(['C', 'G']), 'R': set(['A', 'G']),
+            'W': set(['A', 'U', 'T']), 'V': set(['A', 'C', 'G']),
+            'Y': set(['C', 'U', 'T']),
+            # lower
+            'b': set(['c', 'u', 't', 'g']), 'd': set(['a', 'u', 't', 'g']),
+            'h': set(['a', 'c', 'u', 't']), 'k': set(['u', 't', 'g']),
+            'm': set(['a', 'c']), 'n': set(['a', 'c', 'u', 't', 'g']),
+            's': set(['c', 'g']), 'r': set(['a', 'g']),
+            'w': set(['a', 'u', 't']), 'v': set(['a', 'c', 'g']),
+            'y': set(['c', 'u', 't'])
+        }
+        self.assertEqual(self.b1.iupac_degeneracies(), exp)
+        self.assertEqual(NucleotideSequence.iupac_degeneracies(), exp)
+
+        # Test that we can modify a copy of the mapping without altering the
+        # canonical representation.
+        degen = NucleotideSequence.iupac_degeneracies()
+        degen.update({'V': set("BRO"), 'Z': set("ZORRO")})
+        self.assertNotEqual(degen, exp)
+        self.assertEqual(NucleotideSequence.iupac_degeneracies(), exp)
+
+    def test_iupac_degenerate_characters(self):
+        exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
+                   'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
+        self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
+        self.assertEqual(NucleotideSequence.iupac_degenerate_characters(), exp)
+
+    def test_iupac_characters(self):
+        exp = {
+            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'T',
+            'W', 'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's',
+            'r', 'u', 't', 'w', 'v', 'y'
+        }
+
+        self.assertEqual(self.b1.iupac_characters(), exp)
+        self.assertEqual(NucleotideSequence.iupac_characters(), exp)
+
+    def test_complement(self):
+        self.assertRaises(BiologicalSequenceError,
+                          self.b1.complement)
+
+    def test_reverse_complement(self):
+        self.assertRaises(BiologicalSequenceError,
+                          self.b1.reverse_complement)
+
+    def test_is_reverse_complement(self):
+        self.assertRaises(BiologicalSequenceError,
+                          self.b1.is_reverse_complement, self.b1)
+
+    def test_nondegenerates_invalid(self):
+        with self.assertRaises(BiologicalSequenceError):
+            list(NucleotideSequence('AZA').nondegenerates())
+
+    def test_nondegenerates_empty(self):
+        self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
+
+    def test_nondegenerates_no_degens(self):
+        self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
+
+    def test_nondegenerates_all_degens(self):
+        # Same chars.
+        exp = [NucleotideSequence('CC'), NucleotideSequence('CG'),
+               NucleotideSequence('GC'), NucleotideSequence('GG')]
+        # Sort based on sequence string, as order is not guaranteed.
+        obs = sorted(NucleotideSequence('SS').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+        # Different chars.
+        exp = [NucleotideSequence('AC'), NucleotideSequence('AG'),
+               NucleotideSequence('GC'), NucleotideSequence('GG')]
+        obs = sorted(NucleotideSequence('RS').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+        # Odd number of chars.
+        obs = list(NucleotideSequence('NNN').nondegenerates())
+        self.assertEqual(len(obs), 5**3)
+
+    def test_nondegenerates_mixed_degens(self):
+        exp = [NucleotideSequence('AGC'), NucleotideSequence('AGT'),
+               NucleotideSequence('AGU'), NucleotideSequence('GGC'),
+               NucleotideSequence('GGT'), NucleotideSequence('GGU')]
+        obs = sorted(NucleotideSequence('RGY').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+    def test_nondegenerates_gap_mixed_case(self):
+        exp = [NucleotideSequence('-A.a'), NucleotideSequence('-A.c'),
+               NucleotideSequence('-C.a'), NucleotideSequence('-C.c')]
+        obs = sorted(NucleotideSequence('-M.m').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+    def test_find_features(self):
+        exp = [(0, 2, 'GA'), (4, 5, 'A'), (6, 7, 'A')]
+        obs = list(self.b1.find_features('purine_run'))
+        self.assertEqual(obs, exp)
+
+        exp = [(2, 4, 'TT'), (5, 6, 'C')]
+        obs = list(self.b1.find_features('pyrimidine_run'))
+        self.assertEqual(obs, exp)
+
+        exp = [(0, 1, 'A'), (3, 5, 'GG'), (6, 7, 'A')]
+        obs = list(self.b2.find_features('purine_run'))
+        self.assertEqual(obs, exp)
+
+        exp = [(1, 3, 'CC'), (5, 6, 'U'), (7, 9, 'CC')]
+        obs = list(self.b2.find_features('pyrimidine_run'))
+        self.assertEqual(obs, exp)
+
+    def test_find_features_min_length(self):
+        exp = [(0, 2, 'GA')]
+        obs = list(self.b1.find_features('purine_run', 2))
+        self.assertEqual(obs, exp)
+
+        exp = [(2, 4, 'TT')]
+        obs = list(self.b1.find_features('pyrimidine_run', 2))
+        self.assertEqual(obs, exp)
+
+        exp = [(3, 5, 'GG')]
+        obs = list(self.b2.find_features('purine_run', 2))
+        self.assertEqual(obs, exp)
+
+        exp = [(1, 3, 'CC'), (7, 9, 'CC')]
+        obs = list(self.b2.find_features('pyrimidine_run', 2))
+        self.assertEqual(obs, exp)
+
+    def test_find_features_no_feature_type(self):
+        with self.assertRaises(ValueError):
+            list(self.b1.find_features('nonexistent_feature_type'))
+
+    def test_find_features_allow_gaps(self):
+        exp = [(0, 3, 'G-A'), (6, 9, 'G.A')]
+        obs = list(self.b3.find_features('purine_run', 2, True))
+        self.assertEqual(obs, exp)
+
+        exp = [(3, 6, 'T-T'), (9, 12, 'T.T')]
+        obs = list(self.b3.find_features('pyrimidine_run', 2, True))
+        self.assertEqual(obs, exp)
+
+    def test_nondegenerates_propagate_optional_properties(self):
+        seq = NucleotideSequence('RS', id='foo', description='bar',
+                                 quality=[42, 999])
+
+        exp = [
+            NucleotideSequence('AC', id='foo', description='bar',
+                               quality=[42, 999]),
+            NucleotideSequence('AG', id='foo', description='bar',
+                               quality=[42, 999]),
+            NucleotideSequence('GC', id='foo', description='bar',
+                               quality=[42, 999]),
+            NucleotideSequence('GG', id='foo', description='bar',
+                               quality=[42, 999])
+        ]
+
+        obs = sorted(seq.nondegenerates(), key=str)
+
+        for o, e in zip(obs, exp):
+            # use equals method to ensure that id, description, and quality are
+            # correctly propagated to the resulting sequence
+            self.assertTrue(o.equals(e))
+
+
+class DNASequenceTests(TestCase):
+
+    def setUp(self):
+        self.empty = DNASequence('')
+        self.b1 = DNASequence('GATTACA')
+        self.b2 = DNASequence('ACCGGTACC', id="test-seq-2",
+                              description="A test sequence", quality=range(9))
+        self.b3 = DNASequence(
+            'ACCGGUACC', id="bad-seq-1",
+            description="Not a DNA sequence")
+        self.b4 = DNASequence(
+            'MRWSYKVHDBN', id="degen",
+            description="All of the degenerate bases")
+        self.b5 = DNASequence('.G--ATTAC-A...')
+
+    def test_alphabet(self):
+        exp = {
+            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'T', 'W',
+            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
+            't', 'w', 'v', 'y'
+        }
+
+        self.assertEqual(self.b1.alphabet(), exp)
+        self.assertEqual(DNASequence.alphabet(), exp)
+
+    def test_gap_alphabet(self):
+        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
+
+    def test_complement_map(self):
+        exp = {
+            '-': '-', '.': '.', 'A': 'T', 'C': 'G', 'B': 'V', 'D': 'H',
+            'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
+            'R': 'Y', 'T': 'A', 'W': 'W', 'V': 'B', 'Y': 'R', 'a': 't',
+            'c': 'g', 'b': 'v', 'd': 'h', 'g': 'c', 'h': 'd', 'k': 'm',
+            'm': 'k', 'n': 'n', 's': 's', 'r': 'y', 't': 'a', 'w': 'w',
+            'v': 'b', 'y': 'r'
+        }
+        self.assertEqual(self.b1.complement_map(), exp)
+        self.assertEqual(DNASequence.complement_map(), exp)
+
+    def test_iupac_standard_characters(self):
+        exp = set("ACGTacgt")
+        self.assertEqual(self.b1.iupac_standard_characters(), exp)
+        self.assertEqual(DNASequence.iupac_standard_characters(), exp)
+
+    def test_iupac_degeneracies(self):
+        exp = {
+            'B': set(['C', 'T', 'G']), 'D': set(['A', 'T', 'G']),
+            'H': set(['A', 'C', 'T']), 'K': set(['T', 'G']),
+            'M': set(['A', 'C']), 'N': set(['A', 'C', 'T', 'G']),
+            'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'T']),
+            'V': set(['A', 'C', 'G']), 'Y': set(['C', 'T']),
+            'b': set(['c', 't', 'g']), 'd': set(['a', 't', 'g']),
+            'h': set(['a', 'c', 't']), 'k': set(['t', 'g']),
+            'm': set(['a', 'c']), 'n': set(['a', 'c', 't', 'g']),
+            's': set(['c', 'g']), 'r': set(['a', 'g']), 'w': set(['a', 't']),
+            'v': set(['a', 'c', 'g']), 'y': set(['c', 't'])
+        }
+        self.assertEqual(self.b1.iupac_degeneracies(), exp)
+        self.assertEqual(DNASequence.iupac_degeneracies(), exp)
+
+    def test_iupac_degenerate_characters(self):
+        exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
+                   'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
+        self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
+        self.assertEqual(DNASequence.iupac_degenerate_characters(), exp)
+
+    def test_iupac_characters(self):
+        exp = {
+            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'T', 'W',
+            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
+            't', 'w', 'v', 'y'
+        }
+        self.assertEqual(self.b1.iupac_characters(), exp)
+        self.assertEqual(DNASequence.iupac_characters(), exp)
+
+    def test_complement(self):
+        # use equals method to ensure that id, description, and quality are
+        # correctly propagated to the resulting sequence
+        self.assertTrue(self.b1.complement().equals(DNASequence("CTAATGT")))
+
+        self.assertTrue(self.b2.complement().equals(
+            DNASequence("TGGCCATGG", id="test-seq-2",
+                        description="A test sequence", quality=range(9))))
+
+        self.assertRaises(BiologicalSequenceError, self.b3.complement)
+
+        self.assertTrue(self.b4.complement().equals(
+            DNASequence("KYWSRMBDHVN", id="degen",
+                        description="All of the degenerate bases")))
+
+        self.assertTrue(self.b5.complement().equals(
+            DNASequence(".C--TAATG-T...")))
+
+    def test_reverse_complement(self):
+        # use equals method to ensure that id, description, and (reversed)
+        # quality scores are correctly propagated to the resulting sequence
+        self.assertTrue(self.b1.reverse_complement().equals(
+            DNASequence("TGTAATC")))
+
+        self.assertTrue(self.b2.reverse_complement().equals(
+            DNASequence("GGTACCGGT", id="test-seq-2",
+                        description="A test sequence",
+                        quality=range(9)[::-1])))
+
+        self.assertRaises(BiologicalSequenceError, self.b3.reverse_complement)
+
+        self.assertTrue(self.b4.reverse_complement().equals(
+            DNASequence("NVHDBMRSWYK", id="degen",
+                        description="All of the degenerate bases")))
+
+    def test_unsupported_characters(self):
+        self.assertEqual(self.b1.unsupported_characters(), set())
+        self.assertEqual(self.b2.unsupported_characters(), set())
+        self.assertEqual(self.b3.unsupported_characters(), set('U'))
+        self.assertEqual(self.b4.unsupported_characters(), set())
+
+    def test_has_unsupported_characters(self):
+        self.assertFalse(self.b1.has_unsupported_characters())
+        self.assertFalse(self.b2.has_unsupported_characters())
+        self.assertTrue(self.b3.has_unsupported_characters())
+        self.assertFalse(self.b4.has_unsupported_characters())
+
+    def test_is_reverse_complement(self):
+        self.assertFalse(self.b1.is_reverse_complement(self.b1))
+
+        # id, description, and quality scores should be ignored (only sequence
+        # data and type should be compared)
+        self.assertTrue(self.b1.is_reverse_complement(
+            DNASequence('TGTAATC', quality=range(7))))
+
+        self.assertTrue(
+            self.b4.is_reverse_complement(DNASequence('NVHDBMRSWYK')))
+
+    def test_nondegenerates_invalid(self):
+        with self.assertRaises(BiologicalSequenceError):
+            list(DNASequence('AZA').nondegenerates())
+
+    def test_nondegenerates_empty(self):
+        self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
+
+    def test_nondegenerates_no_degens(self):
+        self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
+
+    def test_nondegenerates_all_degens(self):
+        # Same chars.
+        exp = [DNASequence('CC'), DNASequence('CG'), DNASequence('GC'),
+               DNASequence('GG')]
+        # Sort based on sequence string, as order is not guaranteed.
+        obs = sorted(DNASequence('SS').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+        # Different chars.
+        exp = [DNASequence('AC'), DNASequence('AG'), DNASequence('GC'),
+               DNASequence('GG')]
+        obs = sorted(DNASequence('RS').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+        # Odd number of chars.
+        obs = list(DNASequence('NNN').nondegenerates())
+        self.assertEqual(len(obs), 4**3)
+
+    def test_nondegenerates_mixed_degens(self):
+        exp = [DNASequence('AGC'), DNASequence('AGT'), DNASequence('GGC'),
+               DNASequence('GGT')]
+        obs = sorted(DNASequence('RGY').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+    def test_nondegenerates_gap_mixed_case(self):
+        exp = [DNASequence('-A.a'), DNASequence('-A.c'),
+               DNASequence('-C.a'), DNASequence('-C.c')]
+        obs = sorted(DNASequence('-M.m').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+
+class RNASequenceTests(TestCase):
+
+    def setUp(self):
+        self.empty = RNASequence('')
+        self.b1 = RNASequence('GAUUACA')
+        self.b2 = RNASequence('ACCGGUACC', id="test-seq-2",
+                              description="A test sequence", quality=range(9))
+        self.b3 = RNASequence(
+            'ACCGGTACC', id="bad-seq-1",
+            description="Not a RNA sequence")
+        self.b4 = RNASequence(
+            'MRWSYKVHDBN', id="degen",
+            description="All of the degenerate bases")
+        self.b5 = RNASequence('.G--AUUAC-A...')
+
+    def test_alphabet(self):
+        exp = {
+            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'W',
+            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
+            'u', 'w', 'v', 'y'
+        }
+
+        self.assertEqual(self.b1.alphabet(), exp)
+        self.assertEqual(RNASequence.alphabet(), exp)
+
+    def test_gap_alphabet(self):
+        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
+
+    def test_complement_map(self):
+        exp = {
+            '-': '-', '.': '.', 'A': 'U', 'C': 'G', 'B': 'V', 'D': 'H',
+            'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
+            'R': 'Y', 'U': 'A', 'W': 'W', 'V': 'B', 'Y': 'R', 'a': 'u',
+            'c': 'g', 'b': 'v', 'd': 'h', 'g': 'c', 'h': 'd', 'k': 'm',
+            'm': 'k', 'n': 'n', 's': 's', 'r': 'y', 'u': 'a', 'w': 'w',
+            'v': 'b', 'y': 'r'
+        }
+        self.assertEqual(self.b1.complement_map(), exp)
+        self.assertEqual(RNASequence.complement_map(), exp)
+
+    def test_iupac_standard_characters(self):
+        exp = set("ACGUacgu")
+        self.assertEqual(self.b1.iupac_standard_characters(), exp)
+        self.assertEqual(RNASequence.iupac_standard_characters(), exp)
+
+    def test_iupac_degeneracies(self):
+        exp = {
+            'B': set(['C', 'U', 'G']), 'D': set(['A', 'U', 'G']),
+            'H': set(['A', 'C', 'U']), 'K': set(['U', 'G']),
+            'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'G']),
+            'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'U']),
+            'V': set(['A', 'C', 'G']), 'Y': set(['C', 'U']),
+            'b': set(['c', 'u', 'g']), 'd': set(['a', 'u', 'g']),
+            'h': set(['a', 'c', 'u']), 'k': set(['u', 'g']),
+            'm': set(['a', 'c']), 'n': set(['a', 'c', 'u', 'g']),
+            's': set(['c', 'g']), 'r': set(['a', 'g']), 'w': set(['a', 'u']),
+            'v': set(['a', 'c', 'g']), 'y': set(['c', 'u'])
+        }
+        self.assertEqual(self.b1.iupac_degeneracies(), exp)
+        self.assertEqual(RNASequence.iupac_degeneracies(), exp)
+
+    def test_iupac_degenerate_characters(self):
+        exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
+                   'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
+        self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
+        self.assertEqual(RNASequence.iupac_degenerate_characters(), exp)
+
+    def test_iupac_characters(self):
+        exp = {
+            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'W',
+            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
+            'u', 'w', 'v', 'y'
+        }
+        self.assertEqual(self.b1.iupac_characters(), exp)
+        self.assertEqual(RNASequence.iupac_characters(), exp)
+
+    def test_complement(self):
+        # use equals method to ensure that id, description, and quality are
+        # correctly propagated to the resulting sequence
+        self.assertTrue(self.b1.complement().equals(RNASequence("CUAAUGU")))
+
+        self.assertTrue(self.b2.complement().equals(
+            RNASequence("UGGCCAUGG", id="test-seq-2",
+                        description="A test sequence", quality=range(9))))
+
+        self.assertRaises(BiologicalSequenceError, self.b3.complement)
+
+        self.assertTrue(self.b4.complement().equals(
+            RNASequence("KYWSRMBDHVN", id="degen",
+                        description="All of the degenerate bases")))
+
+        self.assertTrue(self.b5.complement().equals(
+            RNASequence(".C--UAAUG-U...")))
+
+    def test_reverse_complement(self):
+        # use equals method to ensure that id, description, and (reversed)
+        # quality scores are correctly propagated to the resulting sequence
+        self.assertTrue(self.b1.reverse_complement().equals(
+            RNASequence("UGUAAUC")))
+
+        self.assertTrue(self.b2.reverse_complement().equals(
+            RNASequence("GGUACCGGU", id="test-seq-2",
+                        description="A test sequence",
+                        quality=range(9)[::-1])))
+
+        self.assertRaises(BiologicalSequenceError, self.b3.reverse_complement)
+
+        self.assertTrue(self.b4.reverse_complement().equals(
+            RNASequence("NVHDBMRSWYK", id="degen",
+                        description="All of the degenerate bases")))
+
+    def test_unsupported_characters(self):
+        self.assertEqual(self.b1.unsupported_characters(), set())
+        self.assertEqual(self.b2.unsupported_characters(), set())
+        self.assertEqual(self.b3.unsupported_characters(), set('T'))
+        self.assertEqual(self.b4.unsupported_characters(), set())
+
+    def test_has_unsupported_characters(self):
+        self.assertFalse(self.b1.has_unsupported_characters())
+        self.assertFalse(self.b2.has_unsupported_characters())
+        self.assertTrue(self.b3.has_unsupported_characters())
+        self.assertFalse(self.b4.has_unsupported_characters())
+
+    def test_is_reverse_complement(self):
+        self.assertFalse(self.b1.is_reverse_complement(self.b1))
+
+        # id, description, and quality scores should be ignored (only sequence
+        # data and type should be compared)
+        self.assertTrue(self.b1.is_reverse_complement(
+            RNASequence('UGUAAUC', quality=range(7))))
+
+        self.assertTrue(
+            self.b4.is_reverse_complement(RNASequence('NVHDBMRSWYK')))
+
+    def test_nondegenerates_invalid(self):
+        with self.assertRaises(BiologicalSequenceError):
+            list(RNASequence('AZA').nondegenerates())
+
+    def test_nondegenerates_empty(self):
+        self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
+
+    def test_nondegenerates_no_degens(self):
+        self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
+
+    def test_nondegenerates_all_degens(self):
+        # Same chars.
+        exp = [RNASequence('CC'), RNASequence('CG'), RNASequence('GC'),
+               RNASequence('GG')]
+        # Sort based on sequence string, as order is not guaranteed.
+        obs = sorted(RNASequence('SS').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+        # Different chars.
+        exp = [RNASequence('AC'), RNASequence('AG'), RNASequence('GC'),
+               RNASequence('GG')]
+        obs = sorted(RNASequence('RS').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+        # Odd number of chars.
+        obs = list(RNASequence('NNN').nondegenerates())
+        self.assertEqual(len(obs), 4**3)
+
+    def test_nondegenerates_mixed_degens(self):
+        exp = [RNASequence('AGC'), RNASequence('AGU'), RNASequence('GGC'),
+               RNASequence('GGU')]
+        obs = sorted(RNASequence('RGY').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+    def test_nondegenerates_gap_mixed_case(self):
+        exp = [RNASequence('-A.a'), RNASequence('-A.c'),
+               RNASequence('-C.a'), RNASequence('-C.c')]
+        obs = sorted(RNASequence('-M.m').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+
+class ProteinSequenceTests(TestCase):
+
+    def setUp(self):
+        self.empty = ProteinSequence('')
+        self.p1 = ProteinSequence('GREG')
+        self.p2 = ProteinSequence(
+            'PRTEINSEQNCE', id="test-seq-2",
+            description="A test sequence")
+        self.p3 = ProteinSequence(
+            'PROTEIN', id="bad-seq-1",
+            description="Not a protein sequence")
+
+    def test_alphabet(self):
+        exp = {
+            'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',
+            'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c',
+            'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'p', 'q', 'r',
+            's', 't', 'v', 'w', 'x', 'y', 'z'
+        }
+
+        self.assertEqual(self.p1.alphabet(), exp)
+        self.assertEqual(ProteinSequence.alphabet(), exp)
+
+    def test_gap_alphabet(self):
+        self.assertEqual(self.p1.gap_alphabet(), set('-.'))
+
+    def test_iupac_standard_characters(self):
+        exp = set("ACDEFGHIKLMNPQRSTVWYacdefghiklmnpqrstvwy")
+        self.assertEqual(self.p1.iupac_standard_characters(), exp)
+        self.assertEqual(ProteinSequence.iupac_standard_characters(), exp)
+
+    def test_iupac_degeneracies(self):
+        exp = {
+            'B': set(['D', 'N']), 'Z': set(['E', 'Q']),
+            'X': set(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M',
+                      'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']),
+            'b': set(['d', 'n']), 'z': set(['e', 'q']),
+            'x': set(['a', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm',
+                      'n', 'p', 'q', 'r', 's', 't', 'v', 'w', 'y']),
+        }
+        self.assertEqual(self.p1.iupac_degeneracies(), exp)
+        self.assertEqual(ProteinSequence.iupac_degeneracies(), exp)
+
+    def test_iupac_degenerate_characters(self):
+        exp = set(['B', 'X', 'Z', 'b', 'x', 'z'])
+        self.assertEqual(self.p1.iupac_degenerate_characters(), exp)
+        self.assertEqual(ProteinSequence.iupac_degenerate_characters(), exp)
+
+    def test_iupac_characters(self):
+        exp = {
+            'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',
+            'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b',
+            'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'p', 'q',
+            'r', 's', 't', 'v', 'w', 'x', 'y', 'z'
+        }
+        self.assertEqual(self.p1.iupac_characters(), exp)
+        self.assertEqual(ProteinSequence.iupac_characters(), exp)
+
+    def test_nondegenerates(self):
+        exp = [ProteinSequence('AD'), ProteinSequence('AN')]
+        # Sort based on sequence string, as order is not guaranteed.
+        obs = sorted(ProteinSequence('AB').nondegenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/stats/__init__.py b/skbio/stats/__init__.py
new file mode 100644
index 0000000..c24b3ef
--- /dev/null
+++ b/skbio/stats/__init__.py
@@ -0,0 +1,49 @@
+"""
+Statistics (:mod:`skbio.stats`)
+===============================
+
+.. currentmodule:: skbio.stats
+
+This package contains various statistical methods, including ordination
+techniques and distance matrix-based statistics.
+
+Subpackages
+-----------
+
+.. autosummary::
+   :toctree: generated/
+
+   distance
+   ordination
+   spatial
+   gradient
+   power
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   p_value_to_str
+   subsample
+   subsample_counts
+   isubsample
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._misc import p_value_to_str
+from ._subsample import subsample, subsample_counts, isubsample
+
+__all__ = ['p_value_to_str', 'subsample', 'subsample_counts', 'isubsample']
+
+test = Tester().test
diff --git a/skbio/stats/__subsample.c b/skbio/stats/__subsample.c
new file mode 100644
index 0000000..cdf9b0f
--- /dev/null
+++ b/skbio/stats/__subsample.c
@@ -0,0 +1,6415 @@
+/* Generated by Cython 0.20.2 on Wed Nov 19 10:53:19 2014 */
+
+#define PY_SSIZE_T_CLEAN
+#ifndef CYTHON_USE_PYLONG_INTERNALS
+#ifdef PYLONG_BITS_IN_DIGIT
+#define CYTHON_USE_PYLONG_INTERNALS 0
+#else
+#include "pyconfig.h"
+#ifdef PYLONG_BITS_IN_DIGIT
+#define CYTHON_USE_PYLONG_INTERNALS 1
+#else
+#define CYTHON_USE_PYLONG_INTERNALS 0
+#endif
+#endif
+#endif
+#include "Python.h"
+#ifndef Py_PYTHON_H
+    #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02040000
+    #error Cython requires Python 2.4+.
+#else
+#define CYTHON_ABI "0_20_2"
+#include <stddef.h> /* For offsetof */
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+  #ifndef __stdcall
+    #define __stdcall
+  #endif
+  #ifndef __cdecl
+    #define __cdecl
+  #endif
+  #ifndef __fastcall
+    #define __fastcall
+  #endif
+#endif
+#ifndef DL_IMPORT
+  #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+  #define DL_EXPORT(t) t
+#endif
+#ifndef PY_LONG_LONG
+  #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+  #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+#define CYTHON_COMPILING_IN_PYPY 1
+#define CYTHON_COMPILING_IN_CPYTHON 0
+#else
+#define CYTHON_COMPILING_IN_PYPY 0
+#define CYTHON_COMPILING_IN_CPYTHON 1
+#endif
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600
+#define Py_OptimizeFlag 0
+#endif
+#if PY_VERSION_HEX < 0x02050000
+  typedef int Py_ssize_t;
+  #define PY_SSIZE_T_MAX INT_MAX
+  #define PY_SSIZE_T_MIN INT_MIN
+  #define PY_FORMAT_SIZE_T ""
+  #define CYTHON_FORMAT_SSIZE_T ""
+  #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+  #define PyInt_AsSsize_t(o)   __Pyx_PyInt_As_int(o)
+  #define PyNumber_Index(o)    ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \
+                                (PyErr_Format(PyExc_TypeError, \
+                                              "expected index value, got %.200s", Py_TYPE(o)->tp_name), \
+                                 (PyObject*)0))
+  #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
+                                  !PyComplex_Check(o))
+  #define PyIndex_Check __Pyx_PyIndex_Check
+  #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
+  #define __PYX_BUILD_PY_SSIZE_T "i"
+#else
+  #define __PYX_BUILD_PY_SSIZE_T "n"
+  #define CYTHON_FORMAT_SSIZE_T "z"
+  #define __Pyx_PyIndex_Check PyIndex_Check
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+  #define Py_TYPE(ob)   (((PyObject*)(ob))->ob_type)
+  #define Py_SIZE(ob)   (((PyVarObject*)(ob))->ob_size)
+  #define PyVarObject_HEAD_INIT(type, size) \
+          PyObject_HEAD_INIT(type) size,
+  #define PyType_Modified(t)
+  typedef struct {
+     void *buf;
+     PyObject *obj;
+     Py_ssize_t len;
+     Py_ssize_t itemsize;
+     int readonly;
+     int ndim;
+     char *format;
+     Py_ssize_t *shape;
+     Py_ssize_t *strides;
+     Py_ssize_t *suboffsets;
+     void *internal;
+  } Py_buffer;
+  #define PyBUF_SIMPLE 0
+  #define PyBUF_WRITABLE 0x0001
+  #define PyBUF_FORMAT 0x0004
+  #define PyBUF_ND 0x0008
+  #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
+  #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
+  #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
+  #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
+  #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
+  #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
+  #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
+  typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
+  typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
+#endif
+#if PY_MAJOR_VERSION < 3
+  #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+          PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+  #define __Pyx_DefaultClassType PyClass_Type
+#else
+  #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+          PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+  #define __Pyx_DefaultClassType PyType_Type
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define Py_TPFLAGS_CHECKTYPES 0
+  #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+  #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define Py_TPFLAGS_HAVE_VERSION_TAG 0
+#endif
+#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT)
+  #define Py_TPFLAGS_IS_ABSTRACT 0
+#endif
+#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
+  #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+  #define CYTHON_PEP393_ENABLED 1
+  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ? \
+                                              0 : _PyUnicode_Ready((PyObject *)(op)))
+  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_LENGTH(u)
+  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+  #define __Pyx_PyUnicode_KIND(u)         PyUnicode_KIND(u)
+  #define __Pyx_PyUnicode_DATA(u)         PyUnicode_DATA(u)
+  #define __Pyx_PyUnicode_READ(k, d, i)   PyUnicode_READ(k, d, i)
+#else
+  #define CYTHON_PEP393_ENABLED 0
+  #define __Pyx_PyUnicode_READY(op)       (0)
+  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_SIZE(u)
+  #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+  #define __Pyx_PyUnicode_KIND(u)         (sizeof(Py_UNICODE))
+  #define __Pyx_PyUnicode_DATA(u)         ((void*)PyUnicode_AS_UNICODE(u))
+  #define __Pyx_PyUnicode_READ(k, d, i)   ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+  #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
+  #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
+#else
+  #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
+  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
+      PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#define __Pyx_PyString_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+  #define __Pyx_PyString_Format(a, b)  PyUnicode_Format(a, b)
+#else
+  #define __Pyx_PyString_Format(a, b)  PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define PyBaseString_Type            PyUnicode_Type
+  #define PyStringObject               PyUnicodeObject
+  #define PyString_Type                PyUnicode_Type
+  #define PyString_Check               PyUnicode_Check
+  #define PyString_CheckExact          PyUnicode_CheckExact
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define PyBytesObject                PyStringObject
+  #define PyBytes_Type                 PyString_Type
+  #define PyBytes_Check                PyString_Check
+  #define PyBytes_CheckExact           PyString_CheckExact
+  #define PyBytes_FromString           PyString_FromString
+  #define PyBytes_FromStringAndSize    PyString_FromStringAndSize
+  #define PyBytes_FromFormat           PyString_FromFormat
+  #define PyBytes_DecodeEscape         PyString_DecodeEscape
+  #define PyBytes_AsString             PyString_AsString
+  #define PyBytes_AsStringAndSize      PyString_AsStringAndSize
+  #define PyBytes_Size                 PyString_Size
+  #define PyBytes_AS_STRING            PyString_AS_STRING
+  #define PyBytes_GET_SIZE             PyString_GET_SIZE
+  #define PyBytes_Repr                 PyString_Repr
+  #define PyBytes_Concat               PyString_Concat
+  #define PyBytes_ConcatAndDel         PyString_ConcatAndDel
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+  #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+  #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
+                                         PyString_Check(obj) || PyUnicode_Check(obj))
+  #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#if PY_VERSION_HEX < 0x02060000
+  #define PySet_Check(obj)             PyObject_TypeCheck(obj, &PySet_Type)
+  #define PyFrozenSet_Check(obj)       PyObject_TypeCheck(obj, &PyFrozenSet_Type)
+#endif
+#ifndef PySet_CheckExact
+  #define PySet_CheckExact(obj)        (Py_TYPE(obj) == &PySet_Type)
+#endif
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#if PY_MAJOR_VERSION >= 3
+  #define PyIntObject                  PyLongObject
+  #define PyInt_Type                   PyLong_Type
+  #define PyInt_Check(op)              PyLong_Check(op)
+  #define PyInt_CheckExact(op)         PyLong_CheckExact(op)
+  #define PyInt_FromString             PyLong_FromString
+  #define PyInt_FromUnicode            PyLong_FromUnicode
+  #define PyInt_FromLong               PyLong_FromLong
+  #define PyInt_FromSize_t             PyLong_FromSize_t
+  #define PyInt_FromSsize_t            PyLong_FromSsize_t
+  #define PyInt_AsLong                 PyLong_AsLong
+  #define PyInt_AS_LONG                PyLong_AS_LONG
+  #define PyInt_AsSsize_t              PyLong_AsSsize_t
+  #define PyInt_AsUnsignedLongMask     PyLong_AsUnsignedLongMask
+  #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+  #define PyNumber_Int                 PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define PyBoolObject                 PyLongObject
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+  typedef long Py_hash_t;
+  #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+  #define __Pyx_PyInt_AsHash_t   PyInt_AsLong
+#else
+  #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+  #define __Pyx_PyInt_AsHash_t   PyInt_AsSsize_t
+#endif
+#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
+  #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
+  #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
+  #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
+#else
+  #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
+        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
+        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
+            (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
+  #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
+        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
+            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
+  #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
+        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
+            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
+#endif
+#if PY_MAJOR_VERSION >= 3
+  #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+#if PY_VERSION_HEX < 0x02050000
+  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),((char *)(n)))
+  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
+  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),((char *)(n)))
+#else
+  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),(n))
+  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
+  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),(n))
+#endif
+#if PY_VERSION_HEX < 0x02050000
+  #define __Pyx_NAMESTR(n) ((char *)(n))
+  #define __Pyx_DOCSTR(n)  ((char *)(n))
+#else
+  #define __Pyx_NAMESTR(n) (n)
+  #define __Pyx_DOCSTR(n)  (n)
+#endif
+#ifndef CYTHON_INLINE
+  #if defined(__GNUC__)
+    #define CYTHON_INLINE __inline__
+  #elif defined(_MSC_VER)
+    #define CYTHON_INLINE __inline
+  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define CYTHON_INLINE inline
+  #else
+    #define CYTHON_INLINE
+  #endif
+#endif
+#ifndef CYTHON_RESTRICT
+  #if defined(__GNUC__)
+    #define CYTHON_RESTRICT __restrict__
+  #elif defined(_MSC_VER) && _MSC_VER >= 1400
+    #define CYTHON_RESTRICT __restrict
+  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+    #define CYTHON_RESTRICT restrict
+  #else
+    #define CYTHON_RESTRICT
+  #endif
+#endif
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+  /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
+   a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
+   a quiet NaN. */
+  float value;
+  memset(&value, 0xFF, sizeof(value));
+  return value;
+}
+#endif
+#ifdef __cplusplus
+template<typename T>
+void __Pyx_call_destructor(T* x) {
+    x->~T();
+}
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
+  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
+#else
+  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
+  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
+#endif
+
+#ifndef __PYX_EXTERN_C
+  #ifdef __cplusplus
+    #define __PYX_EXTERN_C extern "C"
+  #else
+    #define __PYX_EXTERN_C extern
+  #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#define __PYX_HAVE__skbio__stats____subsample
+#define __PYX_HAVE_API__skbio__stats____subsample
+#include "string.h"
+#include "stdio.h"
+#include "stdlib.h"
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#ifdef PYREX_WITHOUT_ASSERTIONS
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+#   if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+#     define CYTHON_UNUSED __attribute__ ((__unused__))
+#   else
+#     define CYTHON_UNUSED
+#   endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+#   define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+#   define CYTHON_UNUSED
+# endif
+#endif
+typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
+                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (    \
+    (sizeof(type) < sizeof(Py_ssize_t))  ||             \
+    (sizeof(type) > sizeof(Py_ssize_t) &&               \
+          likely(v < (type)PY_SSIZE_T_MAX ||            \
+                 v == (type)PY_SSIZE_T_MAX)  &&         \
+          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||       \
+                                v == (type)PY_SSIZE_T_MIN)))  ||  \
+    (sizeof(type) == sizeof(Py_ssize_t) &&              \
+          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||        \
+                               v == (type)PY_SSIZE_T_MAX)))  )
+static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString        PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+    #define __Pyx_PyStr_FromString        __Pyx_PyBytes_FromString
+    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+    #define __Pyx_PyStr_FromString        __Pyx_PyUnicode_FromString
+    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyObject_AsSString(s)    ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s)    ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromUString(s)  __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromUString(s)   __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromUString(s)   __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromUString(s)     __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((const char*)s)
+#if PY_MAJOR_VERSION < 3
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
+{
+    const Py_UNICODE *u_end = u;
+    while (*u_end++) ;
+    return (size_t)(u_end - u - 1);
+}
+#else
+#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
+#endif
+#define __Pyx_PyUnicode_FromUnicode(u)       PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
+#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+    PyObject* sys;
+    PyObject* default_encoding = NULL;
+    PyObject* ascii_chars_u = NULL;
+    PyObject* ascii_chars_b = NULL;
+    const char* default_encoding_c;
+    sys = PyImport_ImportModule("sys");
+    if (!sys) goto bad;
+    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+    Py_DECREF(sys);
+    if (!default_encoding) goto bad;
+    default_encoding_c = PyBytes_AsString(default_encoding);
+    if (!default_encoding_c) goto bad;
+    if (strcmp(default_encoding_c, "ascii") == 0) {
+        __Pyx_sys_getdefaultencoding_not_ascii = 0;
+    } else {
+        char ascii_chars[128];
+        int c;
+        for (c = 0; c < 128; c++) {
+            ascii_chars[c] = c;
+        }
+        __Pyx_sys_getdefaultencoding_not_ascii = 1;
+        ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+        if (!ascii_chars_u) goto bad;
+        ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+        if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+            PyErr_Format(
+                PyExc_ValueError,
+                "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+                default_encoding_c);
+            goto bad;
+        }
+        Py_DECREF(ascii_chars_u);
+        Py_DECREF(ascii_chars_b);
+    }
+    Py_DECREF(default_encoding);
+    return 0;
+bad:
+    Py_XDECREF(default_encoding);
+    Py_XDECREF(ascii_chars_u);
+    Py_XDECREF(ascii_chars_b);
+    return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+    PyObject* sys;
+    PyObject* default_encoding = NULL;
+    char* default_encoding_c;
+    sys = PyImport_ImportModule("sys");
+    if (!sys) goto bad;
+    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+    Py_DECREF(sys);
+    if (!default_encoding) goto bad;
+    default_encoding_c = PyBytes_AsString(default_encoding);
+    if (!default_encoding_c) goto bad;
+    __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
+    if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+    strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+    Py_DECREF(default_encoding);
+    return 0;
+bad:
+    Py_XDECREF(default_encoding);
+    return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__)     && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+  #define likely(x)   __builtin_expect(!!(x), 1)
+  #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+  #define likely(x)   (x)
+  #define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+#if !defined(CYTHON_CCOMPLEX)
+  #if defined(__cplusplus)
+    #define CYTHON_CCOMPLEX 1
+  #elif defined(_Complex_I)
+    #define CYTHON_CCOMPLEX 1
+  #else
+    #define CYTHON_CCOMPLEX 0
+  #endif
+#endif
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    #include <complex>
+  #else
+    #include <complex.h>
+  #endif
+#endif
+#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
+  #undef _Complex_I
+  #define _Complex_I 1.0fj
+#endif
+
+
+static const char *__pyx_f[] = {
+  "__subsample.pyx",
+  "__init__.pxd",
+  "type.pxd",
+};
+#define IS_UNSIGNED(type) (((type) -1) > 0)
+struct __Pyx_StructField_;
+#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
+typedef struct {
+  const char* name; /* for error messages only */
+  struct __Pyx_StructField_* fields;
+  size_t size;     /* sizeof(type) */
+  size_t arraysize[8]; /* length of array in each dimension */
+  int ndim;
+  char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */
+  char is_unsigned;
+  int flags;
+} __Pyx_TypeInfo;
+typedef struct __Pyx_StructField_ {
+  __Pyx_TypeInfo* type;
+  const char* name;
+  size_t offset;
+} __Pyx_StructField;
+typedef struct {
+  __Pyx_StructField* field;
+  size_t parent_offset;
+} __Pyx_BufFmt_StackElem;
+typedef struct {
+  __Pyx_StructField root;
+  __Pyx_BufFmt_StackElem* head;
+  size_t fmt_offset;
+  size_t new_count, enc_count;
+  size_t struct_alignment;
+  int is_complex;
+  char enc_type;
+  char new_packmode;
+  char enc_packmode;
+  char is_valid_array;
+} __Pyx_BufFmt_Context;
+
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723
+ * # in Cython to enable them only on the right systems.
+ * 
+ * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_int16      int16_t
+ * ctypedef npy_int32      int32_t
+ */
+typedef npy_int8 __pyx_t_5numpy_int8_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724
+ * 
+ * ctypedef npy_int8       int8_t
+ * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_int32      int32_t
+ * ctypedef npy_int64      int64_t
+ */
+typedef npy_int16 __pyx_t_5numpy_int16_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725
+ * ctypedef npy_int8       int8_t
+ * ctypedef npy_int16      int16_t
+ * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_int64      int64_t
+ * #ctypedef npy_int96      int96_t
+ */
+typedef npy_int32 __pyx_t_5numpy_int32_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+ * ctypedef npy_int16      int16_t
+ * ctypedef npy_int32      int32_t
+ * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
+ * #ctypedef npy_int96      int96_t
+ * #ctypedef npy_int128     int128_t
+ */
+typedef npy_int64 __pyx_t_5numpy_int64_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730
+ * #ctypedef npy_int128     int128_t
+ * 
+ * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uint16     uint16_t
+ * ctypedef npy_uint32     uint32_t
+ */
+typedef npy_uint8 __pyx_t_5numpy_uint8_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731
+ * 
+ * ctypedef npy_uint8      uint8_t
+ * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uint32     uint32_t
+ * ctypedef npy_uint64     uint64_t
+ */
+typedef npy_uint16 __pyx_t_5numpy_uint16_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732
+ * ctypedef npy_uint8      uint8_t
+ * ctypedef npy_uint16     uint16_t
+ * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uint64     uint64_t
+ * #ctypedef npy_uint96     uint96_t
+ */
+typedef npy_uint32 __pyx_t_5numpy_uint32_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+ * ctypedef npy_uint16     uint16_t
+ * ctypedef npy_uint32     uint32_t
+ * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
+ * #ctypedef npy_uint96     uint96_t
+ * #ctypedef npy_uint128    uint128_t
+ */
+typedef npy_uint64 __pyx_t_5numpy_uint64_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737
+ * #ctypedef npy_uint128    uint128_t
+ * 
+ * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_float64    float64_t
+ * #ctypedef npy_float80    float80_t
+ */
+typedef npy_float32 __pyx_t_5numpy_float32_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738
+ * 
+ * ctypedef npy_float32    float32_t
+ * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
+ * #ctypedef npy_float80    float80_t
+ * #ctypedef npy_float128   float128_t
+ */
+typedef npy_float64 __pyx_t_5numpy_float64_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747
+ * # The int types are mapped a bit surprising --
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong   long_t
+ * ctypedef npy_longlong   longlong_t
+ */
+typedef npy_long __pyx_t_5numpy_int_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long       int_t
+ * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong   longlong_t
+ * 
+ */
+typedef npy_longlong __pyx_t_5numpy_long_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749
+ * ctypedef npy_long       int_t
+ * ctypedef npy_longlong   long_t
+ * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_ulong      uint_t
+ */
+typedef npy_longlong __pyx_t_5numpy_longlong_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+ * ctypedef npy_longlong   longlong_t
+ * 
+ * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong  ulong_t
+ * ctypedef npy_ulonglong  ulonglong_t
+ */
+typedef npy_ulong __pyx_t_5numpy_uint_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+ * 
+ * ctypedef npy_ulong      uint_t
+ * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong  ulonglong_t
+ * 
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753
+ * ctypedef npy_ulong      uint_t
+ * ctypedef npy_ulonglong  ulong_t
+ * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_intp       intp_t
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+ * ctypedef npy_ulonglong  ulonglong_t
+ * 
+ * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_uintp      uintp_t
+ * 
+ */
+typedef npy_intp __pyx_t_5numpy_intp_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+ * 
+ * ctypedef npy_intp       intp_t
+ * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_double     float_t
+ */
+typedef npy_uintp __pyx_t_5numpy_uintp_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+ * ctypedef npy_uintp      uintp_t
+ * 
+ * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_double     double_t
+ * ctypedef npy_longdouble longdouble_t
+ */
+typedef npy_double __pyx_t_5numpy_float_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+ * 
+ * ctypedef npy_double     float_t
+ * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_longdouble longdouble_t
+ * 
+ */
+typedef npy_double __pyx_t_5numpy_double_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760
+ * ctypedef npy_double     float_t
+ * ctypedef npy_double     double_t
+ * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_cfloat      cfloat_t
+ */
+typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    typedef ::std::complex< float > __pyx_t_float_complex;
+  #else
+    typedef float _Complex __pyx_t_float_complex;
+  #endif
+#else
+    typedef struct { float real, imag; } __pyx_t_float_complex;
+#endif
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    typedef ::std::complex< double > __pyx_t_double_complex;
+  #else
+    typedef double _Complex __pyx_t_double_complex;
+  #endif
+#else
+    typedef struct { double real, imag; } __pyx_t_double_complex;
+#endif
+
+
+/*--- Type declarations ---*/
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+ * ctypedef npy_longdouble longdouble_t
+ * 
+ * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_cdouble     cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t
+ */
+typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+ * 
+ * ctypedef npy_cfloat      cfloat_t
+ * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
+ * ctypedef npy_clongdouble clongdouble_t
+ * 
+ */
+typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764
+ * ctypedef npy_cfloat      cfloat_t
+ * ctypedef npy_cdouble     cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
+ * 
+ * ctypedef npy_cdouble     complex_t
+ */
+typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+ * ctypedef npy_clongdouble clongdouble_t
+ * 
+ * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):
+ */
+typedef npy_cdouble __pyx_t_5numpy_complex_t;
+#ifndef CYTHON_REFNANNY
+  #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+  typedef struct {
+    void (*INCREF)(void*, PyObject*, int);
+    void (*DECREF)(void*, PyObject*, int);
+    void (*GOTREF)(void*, PyObject*, int);
+    void (*GIVEREF)(void*, PyObject*, int);
+    void* (*SetupContext)(const char*, int, const char*);
+    void (*FinishContext)(void**);
+  } __Pyx_RefNannyAPIStruct;
+  static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
+  #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+          if (acquire_gil) { \
+              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+              PyGILState_Release(__pyx_gilstate_save); \
+          } else { \
+              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+          }
+#else
+  #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+          __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+  #define __Pyx_RefNannyFinishContext() \
+          __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+  #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_GOTREF(r)  __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+  #define __Pyx_XINCREF(r)  do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+  #define __Pyx_XDECREF(r)  do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+  #define __Pyx_XGOTREF(r)  do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+  #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+  #define __Pyx_RefNannyDeclarations
+  #define __Pyx_RefNannySetupContext(name, acquire_gil)
+  #define __Pyx_RefNannyFinishContext()
+  #define __Pyx_INCREF(r) Py_INCREF(r)
+  #define __Pyx_DECREF(r) Py_DECREF(r)
+  #define __Pyx_GOTREF(r)
+  #define __Pyx_GIVEREF(r)
+  #define __Pyx_XINCREF(r) Py_XINCREF(r)
+  #define __Pyx_XDECREF(r) Py_XDECREF(r)
+  #define __Pyx_XGOTREF(r)
+  #define __Pyx_XGIVEREF(r)
+#endif /* CYTHON_REFNANNY */
+#define __Pyx_XDECREF_SET(r, v) do {                            \
+        PyObject *tmp = (PyObject *) r;                         \
+        r = v; __Pyx_XDECREF(tmp);                              \
+    } while (0)
+#define __Pyx_DECREF_SET(r, v) do {                             \
+        PyObject *tmp = (PyObject *) r;                         \
+        r = v; __Pyx_DECREF(tmp);                               \
+    } while (0)
+#define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+    PyTypeObject* tp = Py_TYPE(obj);
+    if (likely(tp->tp_getattro))
+        return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+    if (likely(tp->tp_getattr))
+        return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+    return PyObject_GetAttr(obj, attr_name);
+}
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
+    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
+    const char* function_name); /*proto*/
+
+static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
+    const char *name, int exact); /*proto*/
+
+static CYTHON_INLINE int  __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
+    __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
+static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
+
+static void __Pyx_RaiseBufferFallbackError(void); /*proto*/
+
+static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/
+
+#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+        PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
+        int has_cstart, int has_cstop, int wraparound);
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
+
+typedef struct {
+  Py_ssize_t shape, strides, suboffsets;
+} __Pyx_Buf_DimInfo;
+typedef struct {
+  size_t refcount;
+  Py_buffer pybuffer;
+} __Pyx_Buffer;
+typedef struct {
+  __Pyx_Buffer *rcbuffer;
+  char *data;
+  __Pyx_Buf_DimInfo diminfo[8];
+} __Pyx_LocalBuf_ND;
+
+#if PY_MAJOR_VERSION < 3
+    static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
+    static void __Pyx_ReleaseBuffer(Py_buffer *view);
+#else
+    #define __Pyx_GetBuffer PyObject_GetBuffer
+    #define __Pyx_ReleaseBuffer PyBuffer_Release
+#endif
+
+
+static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value);
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value);
+
+static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *);
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    #define __Pyx_CREAL(z) ((z).real())
+    #define __Pyx_CIMAG(z) ((z).imag())
+  #else
+    #define __Pyx_CREAL(z) (__real__(z))
+    #define __Pyx_CIMAG(z) (__imag__(z))
+  #endif
+#else
+    #define __Pyx_CREAL(z) ((z).real)
+    #define __Pyx_CIMAG(z) ((z).imag)
+#endif
+#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
+    #define __Pyx_SET_CREAL(z,x) ((z).real(x))
+    #define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
+#else
+    #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
+    #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
+#endif
+
+static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
+
+#if CYTHON_CCOMPLEX
+    #define __Pyx_c_eqf(a, b)   ((a)==(b))
+    #define __Pyx_c_sumf(a, b)  ((a)+(b))
+    #define __Pyx_c_difff(a, b) ((a)-(b))
+    #define __Pyx_c_prodf(a, b) ((a)*(b))
+    #define __Pyx_c_quotf(a, b) ((a)/(b))
+    #define __Pyx_c_negf(a)     (-(a))
+  #ifdef __cplusplus
+    #define __Pyx_c_is_zerof(z) ((z)==(float)0)
+    #define __Pyx_c_conjf(z)    (::std::conj(z))
+    #if 1
+        #define __Pyx_c_absf(z)     (::std::abs(z))
+        #define __Pyx_c_powf(a, b)  (::std::pow(a, b))
+    #endif
+  #else
+    #define __Pyx_c_is_zerof(z) ((z)==0)
+    #define __Pyx_c_conjf(z)    (conjf(z))
+    #if 1
+        #define __Pyx_c_absf(z)     (cabsf(z))
+        #define __Pyx_c_powf(a, b)  (cpowf(a, b))
+    #endif
+ #endif
+#else
+    static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
+    static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
+    #if 1
+        static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
+        static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
+    #endif
+#endif
+
+static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
+
+#if CYTHON_CCOMPLEX
+    #define __Pyx_c_eq(a, b)   ((a)==(b))
+    #define __Pyx_c_sum(a, b)  ((a)+(b))
+    #define __Pyx_c_diff(a, b) ((a)-(b))
+    #define __Pyx_c_prod(a, b) ((a)*(b))
+    #define __Pyx_c_quot(a, b) ((a)/(b))
+    #define __Pyx_c_neg(a)     (-(a))
+  #ifdef __cplusplus
+    #define __Pyx_c_is_zero(z) ((z)==(double)0)
+    #define __Pyx_c_conj(z)    (::std::conj(z))
+    #if 1
+        #define __Pyx_c_abs(z)     (::std::abs(z))
+        #define __Pyx_c_pow(a, b)  (::std::pow(a, b))
+    #endif
+  #else
+    #define __Pyx_c_is_zero(z) ((z)==0)
+    #define __Pyx_c_conj(z)    (conj(z))
+    #if 1
+        #define __Pyx_c_abs(z)     (cabs(z))
+        #define __Pyx_c_pow(a, b)  (cpow(a, b))
+    #endif
+ #endif
+#else
+    static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
+    static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
+    #if 1
+        static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
+        static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
+    #endif
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+static int __Pyx_check_binary_version(void);
+
+#if !defined(__Pyx_PyIdentifier_FromString)
+#if PY_MAJOR_VERSION < 3
+  #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
+#else
+  #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
+#endif
+#endif
+
+static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
+
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);  /*proto*/
+
+typedef struct {
+    int code_line;
+    PyCodeObject* code_object;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+    int count;
+    int max_count;
+    __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+
+/* Module declarations from 'cpython.buffer' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'libc.stdlib' */
+
+/* Module declarations from 'numpy' */
+
+/* Module declarations from 'numpy' */
+static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
+static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
+static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
+
+/* Module declarations from 'skbio.stats.__subsample' */
+static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t = { "int64_t", NULL, sizeof(__pyx_t_5numpy_int64_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int64_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int64_t), 0 };
+#define __Pyx_MODULE_NAME "skbio.stats.__subsample"
+int __pyx_module_is_main_skbio__stats____subsample = 0;
+
+/* Implementation of 'skbio.stats.__subsample' */
+static PyObject *__pyx_builtin_range;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_RuntimeError;
+static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyObject *__pyx_v_n, PyObject *__pyx_v_counts_sum); /* proto */
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
+static char __pyx_k_B[] = "B";
+static char __pyx_k_H[] = "H";
+static char __pyx_k_I[] = "I";
+static char __pyx_k_L[] = "L";
+static char __pyx_k_O[] = "O";
+static char __pyx_k_Q[] = "Q";
+static char __pyx_k_b[] = "b";
+static char __pyx_k_d[] = "d";
+static char __pyx_k_f[] = "f";
+static char __pyx_k_g[] = "g";
+static char __pyx_k_h[] = "h";
+static char __pyx_k_i[] = "i";
+static char __pyx_k_j[] = "j";
+static char __pyx_k_l[] = "l";
+static char __pyx_k_n[] = "n";
+static char __pyx_k_q[] = "q";
+static char __pyx_k_Zd[] = "Zd";
+static char __pyx_k_Zf[] = "Zf";
+static char __pyx_k_Zg[] = "Zg";
+static char __pyx_k_np[] = "np";
+static char __pyx_k_cnt[] = "cnt";
+static char __pyx_k_idx[] = "idx";
+static char __pyx_k_main[] = "__main__";
+static char __pyx_k_test[] = "__test__";
+static char __pyx_k_dtype[] = "dtype";
+static char __pyx_k_empty[] = "empty";
+static char __pyx_k_numpy[] = "numpy";
+static char __pyx_k_range[] = "range";
+static char __pyx_k_counts[] = "counts";
+static char __pyx_k_import[] = "__import__";
+static char __pyx_k_random[] = "random";
+static char __pyx_k_result[] = "result";
+static char __pyx_k_permuted[] = "permuted";
+static char __pyx_k_unpacked[] = "unpacked";
+static char __pyx_k_ValueError[] = "ValueError";
+static char __pyx_k_counts_sum[] = "counts_sum";
+static char __pyx_k_zeros_like[] = "zeros_like";
+static char __pyx_k_permutation[] = "permutation";
+static char __pyx_k_RuntimeError[] = "RuntimeError";
+static char __pyx_k_unpacked_idx[] = "unpacked_idx";
+static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
+static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer";
+static char __pyx_k_skbio_stats___subsample[] = "skbio.stats.__subsample";
+static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
+static char __pyx_k_Users_jairideout_dev_scikit_bio[] = "/Users/jairideout/dev/scikit-bio/skbio/stats/__subsample.pyx";
+static char __pyx_k_subsample_counts_without_replac[] = "_subsample_counts_without_replacement";
+static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
+static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
+static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
+static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
+static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
+static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
+static PyObject *__pyx_n_s_RuntimeError;
+static PyObject *__pyx_kp_s_Users_jairideout_dev_scikit_bio;
+static PyObject *__pyx_n_s_ValueError;
+static PyObject *__pyx_n_s_cnt;
+static PyObject *__pyx_n_s_counts;
+static PyObject *__pyx_n_s_counts_sum;
+static PyObject *__pyx_n_s_dtype;
+static PyObject *__pyx_n_s_empty;
+static PyObject *__pyx_n_s_i;
+static PyObject *__pyx_n_s_idx;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_j;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_n;
+static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
+static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
+static PyObject *__pyx_n_s_np;
+static PyObject *__pyx_n_s_numpy;
+static PyObject *__pyx_n_s_permutation;
+static PyObject *__pyx_n_s_permuted;
+static PyObject *__pyx_n_s_pyx_getbuffer;
+static PyObject *__pyx_n_s_pyx_releasebuffer;
+static PyObject *__pyx_n_s_random;
+static PyObject *__pyx_n_s_range;
+static PyObject *__pyx_n_s_result;
+static PyObject *__pyx_n_s_skbio_stats___subsample;
+static PyObject *__pyx_n_s_subsample_counts_without_replac;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
+static PyObject *__pyx_n_s_unpacked;
+static PyObject *__pyx_n_s_unpacked_idx;
+static PyObject *__pyx_n_s_zeros_like;
+static PyObject *__pyx_tuple_;
+static PyObject *__pyx_tuple__2;
+static PyObject *__pyx_tuple__3;
+static PyObject *__pyx_tuple__4;
+static PyObject *__pyx_tuple__5;
+static PyObject *__pyx_tuple__6;
+static PyObject *__pyx_tuple__7;
+static PyObject *__pyx_codeobj__8;
+
+/* "skbio/stats/__subsample.pyx":15
+ * 
+ * 
+ * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
+ *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
+ *     cdef:
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement = {__Pyx_NAMESTR("_subsample_counts_without_replacement"), (PyCFunction)__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)};
+static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+  PyArrayObject *__pyx_v_counts = 0;
+  PyObject *__pyx_v_n = 0;
+  PyObject *__pyx_v_counts_sum = 0;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  PyObject *__pyx_r = 0;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("_subsample_counts_without_replacement (wrapper)", 0);
+  {
+    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_counts,&__pyx_n_s_n,&__pyx_n_s_counts_sum,0};
+    PyObject* values[3] = {0,0,0};
+    if (unlikely(__pyx_kwds)) {
+      Py_ssize_t kw_args;
+      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+      switch (pos_args) {
+        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+        case  0: break;
+        default: goto __pyx_L5_argtuple_error;
+      }
+      kw_args = PyDict_Size(__pyx_kwds);
+      switch (pos_args) {
+        case  0:
+        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--;
+        else goto __pyx_L5_argtuple_error;
+        case  1:
+        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+        case  2:
+        if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts_sum)) != 0)) kw_args--;
+        else {
+          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        }
+      }
+      if (unlikely(kw_args > 0)) {
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_subsample_counts_without_replacement") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      }
+    } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+      goto __pyx_L5_argtuple_error;
+    } else {
+      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+    }
+    __pyx_v_counts = ((PyArrayObject *)values[0]);
+    __pyx_v_n = values[1];
+    __pyx_v_counts_sum = values[2];
+  }
+  goto __pyx_L4_argument_unpacking_done;
+  __pyx_L5_argtuple_error:;
+  __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __pyx_L3_error:;
+  __Pyx_AddTraceback("skbio.stats.__subsample._subsample_counts_without_replacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __Pyx_RefNannyFinishContext();
+  return NULL;
+  __pyx_L4_argument_unpacking_done:;
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_counts), __pyx_ptype_5numpy_ndarray, 1, "counts", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_r = __pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(__pyx_self, __pyx_v_counts, __pyx_v_n, __pyx_v_counts_sum);
+
+  /* function exit code */
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_counts, PyObject *__pyx_v_n, PyObject *__pyx_v_counts_sum) {
+  PyArrayObject *__pyx_v_result = 0;
+  PyArrayObject *__pyx_v_permuted = 0;
+  PyArrayObject *__pyx_v_unpacked = 0;
+  __pyx_t_5numpy_int64_t __pyx_v_cnt;
+  Py_ssize_t __pyx_v_unpacked_idx;
+  Py_ssize_t __pyx_v_i;
+  CYTHON_UNUSED Py_ssize_t __pyx_v_j;
+  npy_intp __pyx_v_idx;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_counts;
+  __Pyx_Buffer __pyx_pybuffer_counts;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_permuted;
+  __Pyx_Buffer __pyx_pybuffer_permuted;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_result;
+  __Pyx_Buffer __pyx_pybuffer_result;
+  __Pyx_LocalBuf_ND __pyx_pybuffernd_unpacked;
+  __Pyx_Buffer __pyx_pybuffer_unpacked;
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  PyObject *__pyx_t_2 = NULL;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  PyArrayObject *__pyx_t_5 = NULL;
+  int __pyx_t_6;
+  PyObject *__pyx_t_7 = NULL;
+  PyObject *__pyx_t_8 = NULL;
+  PyObject *__pyx_t_9 = NULL;
+  npy_intp __pyx_t_10;
+  Py_ssize_t __pyx_t_11;
+  Py_ssize_t __pyx_t_12;
+  __pyx_t_5numpy_int64_t __pyx_t_13;
+  Py_ssize_t __pyx_t_14;
+  Py_ssize_t __pyx_t_15;
+  npy_intp __pyx_t_16;
+  npy_intp __pyx_t_17;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_subsample_counts_without_replacement", 0);
+  __pyx_pybuffer_result.pybuffer.buf = NULL;
+  __pyx_pybuffer_result.refcount = 0;
+  __pyx_pybuffernd_result.data = NULL;
+  __pyx_pybuffernd_result.rcbuffer = &__pyx_pybuffer_result;
+  __pyx_pybuffer_permuted.pybuffer.buf = NULL;
+  __pyx_pybuffer_permuted.refcount = 0;
+  __pyx_pybuffernd_permuted.data = NULL;
+  __pyx_pybuffernd_permuted.rcbuffer = &__pyx_pybuffer_permuted;
+  __pyx_pybuffer_unpacked.pybuffer.buf = NULL;
+  __pyx_pybuffer_unpacked.refcount = 0;
+  __pyx_pybuffernd_unpacked.data = NULL;
+  __pyx_pybuffernd_unpacked.rcbuffer = &__pyx_pybuffer_unpacked;
+  __pyx_pybuffer_counts.pybuffer.buf = NULL;
+  __pyx_pybuffer_counts.refcount = 0;
+  __pyx_pybuffernd_counts.data = NULL;
+  __pyx_pybuffernd_counts.rcbuffer = &__pyx_pybuffer_counts;
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_counts.rcbuffer->pybuffer, (PyObject*)__pyx_v_counts, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_pybuffernd_counts.diminfo[0].strides = __pyx_pybuffernd_counts.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_counts.diminfo[0].shape = __pyx_pybuffernd_counts.rcbuffer->pybuffer.shape[0];
+
+  /* "skbio/stats/__subsample.pyx":22
+ *         Py_ssize_t unpacked_idx, i, j
+ * 
+ *     unpacked = np.empty(counts_sum, dtype=int)             # <<<<<<<<<<<<<<
+ *     unpacked_idx = 0
+ *     for i in range(counts.shape[0]):
+ */
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_INCREF(__pyx_v_counts_sum);
+  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_counts_sum);
+  __Pyx_GIVEREF(__pyx_v_counts_sum);
+  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, ((PyObject *)((PyObject*)(&PyInt_Type)))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer);
+    __pyx_t_6 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_6 < 0)) {
+      PyErr_Fetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer, (PyObject*)__pyx_v_unpacked, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_7); Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
+      }
+    }
+    __pyx_pybuffernd_unpacked.diminfo[0].strides = __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_unpacked.diminfo[0].shape = __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_5 = 0;
+  __pyx_v_unpacked = ((PyArrayObject *)__pyx_t_4);
+  __pyx_t_4 = 0;
+
+  /* "skbio/stats/__subsample.pyx":23
+ * 
+ *     unpacked = np.empty(counts_sum, dtype=int)
+ *     unpacked_idx = 0             # <<<<<<<<<<<<<<
+ *     for i in range(counts.shape[0]):
+ *         cnt = counts[i]
+ */
+  __pyx_v_unpacked_idx = 0;
+
+  /* "skbio/stats/__subsample.pyx":24
+ *     unpacked = np.empty(counts_sum, dtype=int)
+ *     unpacked_idx = 0
+ *     for i in range(counts.shape[0]):             # <<<<<<<<<<<<<<
+ *         cnt = counts[i]
+ *         for j in range(cnt):
+ */
+  __pyx_t_10 = (__pyx_v_counts->dimensions[0]);
+  for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
+    __pyx_v_i = __pyx_t_11;
+
+    /* "skbio/stats/__subsample.pyx":25
+ *     unpacked_idx = 0
+ *     for i in range(counts.shape[0]):
+ *         cnt = counts[i]             # <<<<<<<<<<<<<<
+ *         for j in range(cnt):
+ *             unpacked[unpacked_idx] = i
+ */
+    __pyx_t_12 = __pyx_v_i;
+    __pyx_t_6 = -1;
+    if (__pyx_t_12 < 0) {
+      __pyx_t_12 += __pyx_pybuffernd_counts.diminfo[0].shape;
+      if (unlikely(__pyx_t_12 < 0)) __pyx_t_6 = 0;
+    } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_counts.diminfo[0].shape)) __pyx_t_6 = 0;
+    if (unlikely(__pyx_t_6 != -1)) {
+      __Pyx_RaiseBufferIndexError(__pyx_t_6);
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    __pyx_v_cnt = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_counts.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_counts.diminfo[0].strides));
+
+    /* "skbio/stats/__subsample.pyx":26
+ *     for i in range(counts.shape[0]):
+ *         cnt = counts[i]
+ *         for j in range(cnt):             # <<<<<<<<<<<<<<
+ *             unpacked[unpacked_idx] = i
+ *             unpacked_idx += 1
+ */
+    __pyx_t_13 = __pyx_v_cnt;
+    for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) {
+      __pyx_v_j = __pyx_t_14;
+
+      /* "skbio/stats/__subsample.pyx":27
+ *         cnt = counts[i]
+ *         for j in range(cnt):
+ *             unpacked[unpacked_idx] = i             # <<<<<<<<<<<<<<
+ *             unpacked_idx += 1
+ * 
+ */
+      __pyx_t_15 = __pyx_v_unpacked_idx;
+      __pyx_t_6 = -1;
+      if (__pyx_t_15 < 0) {
+        __pyx_t_15 += __pyx_pybuffernd_unpacked.diminfo[0].shape;
+        if (unlikely(__pyx_t_15 < 0)) __pyx_t_6 = 0;
+      } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_unpacked.diminfo[0].shape)) __pyx_t_6 = 0;
+      if (unlikely(__pyx_t_6 != -1)) {
+        __Pyx_RaiseBufferIndexError(__pyx_t_6);
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+      *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_unpacked.diminfo[0].strides) = __pyx_v_i;
+
+      /* "skbio/stats/__subsample.pyx":28
+ *         for j in range(cnt):
+ *             unpacked[unpacked_idx] = i
+ *             unpacked_idx += 1             # <<<<<<<<<<<<<<
+ * 
+ *     permuted = np.random.permutation(unpacked)[:n]
+ */
+      __pyx_v_unpacked_idx = (__pyx_v_unpacked_idx + 1);
+    }
+  }
+
+  /* "skbio/stats/__subsample.pyx":30
+ *             unpacked_idx += 1
+ * 
+ *     permuted = np.random.permutation(unpacked)[:n]             # <<<<<<<<<<<<<<
+ * 
+ *     result = np.zeros_like(counts)
+ */
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_random); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_permutation); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_INCREF(((PyObject *)__pyx_v_unpacked));
+  PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_unpacked));
+  __Pyx_GIVEREF(((PyObject *)__pyx_v_unpacked));
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_1, 0, 0, NULL, &__pyx_v_n, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer);
+    __pyx_t_6 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_6 < 0)) {
+      PyErr_Fetch(&__pyx_t_9, &__pyx_t_8, &__pyx_t_7);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer, (PyObject*)__pyx_v_permuted, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_7);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_9, __pyx_t_8, __pyx_t_7);
+      }
+    }
+    __pyx_pybuffernd_permuted.diminfo[0].strides = __pyx_pybuffernd_permuted.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_permuted.diminfo[0].shape = __pyx_pybuffernd_permuted.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_5 = 0;
+  __pyx_v_permuted = ((PyArrayObject *)__pyx_t_3);
+  __pyx_t_3 = 0;
+
+  /* "skbio/stats/__subsample.pyx":32
+ *     permuted = np.random.permutation(unpacked)[:n]
+ * 
+ *     result = np.zeros_like(counts)             # <<<<<<<<<<<<<<
+ *     for idx in range(permuted.shape[0]):
+ *         result[permuted[idx]] += 1
+ */
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_INCREF(((PyObject *)__pyx_v_counts));
+  PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_counts));
+  __Pyx_GIVEREF(((PyObject *)__pyx_v_counts));
+  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
+  {
+    __Pyx_BufFmt_StackElem __pyx_stack[1];
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result.rcbuffer->pybuffer);
+    __pyx_t_6 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_result.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_6 < 0)) {
+      PyErr_Fetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
+      if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_result.rcbuffer->pybuffer, (PyObject*)__pyx_v_result, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
+        Py_XDECREF(__pyx_t_7); Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9);
+        __Pyx_RaiseBufferFallbackError();
+      } else {
+        PyErr_Restore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
+      }
+    }
+    __pyx_pybuffernd_result.diminfo[0].strides = __pyx_pybuffernd_result.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_result.diminfo[0].shape = __pyx_pybuffernd_result.rcbuffer->pybuffer.shape[0];
+    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_5 = 0;
+  __pyx_v_result = ((PyArrayObject *)__pyx_t_4);
+  __pyx_t_4 = 0;
+
+  /* "skbio/stats/__subsample.pyx":33
+ * 
+ *     result = np.zeros_like(counts)
+ *     for idx in range(permuted.shape[0]):             # <<<<<<<<<<<<<<
+ *         result[permuted[idx]] += 1
+ * 
+ */
+  __pyx_t_10 = (__pyx_v_permuted->dimensions[0]);
+  for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_10; __pyx_t_16+=1) {
+    __pyx_v_idx = __pyx_t_16;
+
+    /* "skbio/stats/__subsample.pyx":34
+ *     result = np.zeros_like(counts)
+ *     for idx in range(permuted.shape[0]):
+ *         result[permuted[idx]] += 1             # <<<<<<<<<<<<<<
+ * 
+ *     return result
+ */
+    __pyx_t_17 = __pyx_v_idx;
+    __pyx_t_6 = -1;
+    if (__pyx_t_17 < 0) {
+      __pyx_t_17 += __pyx_pybuffernd_permuted.diminfo[0].shape;
+      if (unlikely(__pyx_t_17 < 0)) __pyx_t_6 = 0;
+    } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_permuted.diminfo[0].shape)) __pyx_t_6 = 0;
+    if (unlikely(__pyx_t_6 != -1)) {
+      __Pyx_RaiseBufferIndexError(__pyx_t_6);
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    __pyx_t_13 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_permuted.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_permuted.diminfo[0].strides));
+    __pyx_t_6 = -1;
+    if (__pyx_t_13 < 0) {
+      __pyx_t_13 += __pyx_pybuffernd_result.diminfo[0].shape;
+      if (unlikely(__pyx_t_13 < 0)) __pyx_t_6 = 0;
+    } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_result.diminfo[0].shape)) __pyx_t_6 = 0;
+    if (unlikely(__pyx_t_6 != -1)) {
+      __Pyx_RaiseBufferIndexError(__pyx_t_6);
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_result.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_result.diminfo[0].strides) += 1;
+  }
+
+  /* "skbio/stats/__subsample.pyx":36
+ *         result[permuted[idx]] += 1
+ * 
+ *     return result             # <<<<<<<<<<<<<<
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __Pyx_INCREF(((PyObject *)__pyx_v_result));
+  __pyx_r = ((PyObject *)__pyx_v_result);
+  goto __pyx_L0;
+
+  /* "skbio/stats/__subsample.pyx":15
+ * 
+ * 
+ * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
+ *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
+ *     cdef:
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_2);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_counts.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result.rcbuffer->pybuffer);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.stats.__subsample._subsample_counts_without_replacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  goto __pyx_L2;
+  __pyx_L0:;
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_counts.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_permuted.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result.rcbuffer->pybuffer);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_unpacked.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_result);
+  __Pyx_XDECREF((PyObject *)__pyx_v_permuted);
+  __Pyx_XDECREF((PyObject *)__pyx_v_unpacked);
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+ *         # experimental exception made for __getbuffer__ and __releasebuffer__
+ *         # -- the details of this may change.
+ *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
+ *             # This implementation of getbuffer is geared towards Cython
+ *             # requirements, and does not yet fullfill the PEP.
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
+  __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+  int __pyx_v_copy_shape;
+  int __pyx_v_i;
+  int __pyx_v_ndim;
+  int __pyx_v_endian_detector;
+  int __pyx_v_little_endian;
+  int __pyx_v_t;
+  char *__pyx_v_f;
+  PyArray_Descr *__pyx_v_descr = 0;
+  int __pyx_v_offset;
+  int __pyx_v_hasfields;
+  int __pyx_r;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  int __pyx_t_2;
+  int __pyx_t_3;
+  PyObject *__pyx_t_4 = NULL;
+  int __pyx_t_5;
+  int __pyx_t_6;
+  int __pyx_t_7;
+  PyObject *__pyx_t_8 = NULL;
+  char *__pyx_t_9;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("__getbuffer__", 0);
+  if (__pyx_v_info != NULL) {
+    __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
+    __Pyx_GIVEREF(__pyx_v_info->obj);
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200
+ *             # of flags
+ * 
+ *             if info == NULL: return             # <<<<<<<<<<<<<<
+ * 
+ *             cdef int copy_shape, i, ndim
+ */
+  __pyx_t_1 = ((__pyx_v_info == NULL) != 0);
+  if (__pyx_t_1) {
+    __pyx_r = 0;
+    goto __pyx_L0;
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+ * 
+ *             cdef int copy_shape, i, ndim
+ *             cdef int endian_detector = 1             # <<<<<<<<<<<<<<
+ *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ * 
+ */
+  __pyx_v_endian_detector = 1;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204
+ *             cdef int copy_shape, i, ndim
+ *             cdef int endian_detector = 1
+ *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
+ * 
+ *             ndim = PyArray_NDIM(self)
+ */
+  __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+ *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ * 
+ *             ndim = PyArray_NDIM(self)             # <<<<<<<<<<<<<<
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+  __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208
+ *             ndim = PyArray_NDIM(self)
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 copy_shape = 1
+ *             else:
+ */
+  __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+ * 
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ *                 copy_shape = 1             # <<<<<<<<<<<<<<
+ *             else:
+ *                 copy_shape = 0
+ */
+    __pyx_v_copy_shape = 1;
+    goto __pyx_L4;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+ *                 copy_shape = 1
+ *             else:
+ *                 copy_shape = 0             # <<<<<<<<<<<<<<
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ */
+    __pyx_v_copy_shape = 0;
+  }
+  __pyx_L4:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213
+ *                 copy_shape = 0
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ */
+  __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+ * 
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):             # <<<<<<<<<<<<<<
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ */
+    __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
+    __pyx_t_3 = __pyx_t_2;
+  } else {
+    __pyx_t_3 = __pyx_t_1;
+  }
+  if (__pyx_t_3) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ */
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+ *                 raise ValueError(u"ndarray is not C contiguous")
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ */
+  __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
+  if (__pyx_t_3) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):             # <<<<<<<<<<<<<<
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ * 
+ */
+    __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
+    __pyx_t_2 = __pyx_t_1;
+  } else {
+    __pyx_t_2 = __pyx_t_3;
+  }
+  if (__pyx_t_2) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             info.buf = PyArray_DATA(self)
+ */
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")
+ * 
+ *             info.buf = PyArray_DATA(self)             # <<<<<<<<<<<<<<
+ *             info.ndim = ndim
+ *             if copy_shape:
+ */
+  __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+ * 
+ *             info.buf = PyArray_DATA(self)
+ *             info.ndim = ndim             # <<<<<<<<<<<<<<
+ *             if copy_shape:
+ *                 # Allocate new buffer for strides and shape info.
+ */
+  __pyx_v_info->ndim = __pyx_v_ndim;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223
+ *             info.buf = PyArray_DATA(self)
+ *             info.ndim = ndim
+ *             if copy_shape:             # <<<<<<<<<<<<<<
+ *                 # Allocate new buffer for strides and shape info.
+ *                 # This is allocated as one block, strides first.
+ */
+  __pyx_t_2 = (__pyx_v_copy_shape != 0);
+  if (__pyx_t_2) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+ *                 # Allocate new buffer for strides and shape info.
+ *                 # This is allocated as one block, strides first.
+ *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)             # <<<<<<<<<<<<<<
+ *                 info.shape = info.strides + ndim
+ *                 for i in range(ndim):
+ */
+    __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227
+ *                 # This is allocated as one block, strides first.
+ *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
+ *                 info.shape = info.strides + ndim             # <<<<<<<<<<<<<<
+ *                 for i in range(ndim):
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]
+ */
+    __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228
+ *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
+ *                 info.shape = info.strides + ndim
+ *                 for i in range(ndim):             # <<<<<<<<<<<<<<
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]
+ *                     info.shape[i] = PyArray_DIMS(self)[i]
+ */
+    __pyx_t_5 = __pyx_v_ndim;
+    for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
+      __pyx_v_i = __pyx_t_6;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+ *                 info.shape = info.strides + ndim
+ *                 for i in range(ndim):
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]             # <<<<<<<<<<<<<<
+ *                     info.shape[i] = PyArray_DIMS(self)[i]
+ *             else:
+ */
+      (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+ *                 for i in range(ndim):
+ *                     info.strides[i] = PyArray_STRIDES(self)[i]
+ *                     info.shape[i] = PyArray_DIMS(self)[i]             # <<<<<<<<<<<<<<
+ *             else:
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
+ */
+      (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
+    }
+    goto __pyx_L7;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+ *                     info.shape[i] = PyArray_DIMS(self)[i]
+ *             else:
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)             # <<<<<<<<<<<<<<
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
+ *             info.suboffsets = NULL
+ */
+    __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+ *             else:
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)             # <<<<<<<<<<<<<<
+ *             info.suboffsets = NULL
+ *             info.itemsize = PyArray_ITEMSIZE(self)
+ */
+    __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
+  }
+  __pyx_L7:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234
+ *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
+ *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
+ *             info.itemsize = PyArray_ITEMSIZE(self)
+ *             info.readonly = not PyArray_ISWRITEABLE(self)
+ */
+  __pyx_v_info->suboffsets = NULL;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+ *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
+ *             info.suboffsets = NULL
+ *             info.itemsize = PyArray_ITEMSIZE(self)             # <<<<<<<<<<<<<<
+ *             info.readonly = not PyArray_ISWRITEABLE(self)
+ * 
+ */
+  __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+ *             info.suboffsets = NULL
+ *             info.itemsize = PyArray_ITEMSIZE(self)
+ *             info.readonly = not PyArray_ISWRITEABLE(self)             # <<<<<<<<<<<<<<
+ * 
+ *             cdef int t
+ */
+  __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+ * 
+ *             cdef int t
+ *             cdef char* f = NULL             # <<<<<<<<<<<<<<
+ *             cdef dtype descr = self.descr
+ *             cdef list stack
+ */
+  __pyx_v_f = NULL;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240
+ *             cdef int t
+ *             cdef char* f = NULL
+ *             cdef dtype descr = self.descr             # <<<<<<<<<<<<<<
+ *             cdef list stack
+ *             cdef int offset
+ */
+  __pyx_t_4 = ((PyObject *)__pyx_v_self->descr);
+  __Pyx_INCREF(__pyx_t_4);
+  __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4);
+  __pyx_t_4 = 0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244
+ *             cdef int offset
+ * 
+ *             cdef bint hasfields = PyDataType_HASFIELDS(descr)             # <<<<<<<<<<<<<<
+ * 
+ *             if not hasfields and not copy_shape:
+ */
+  __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246
+ *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
+ * 
+ *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
+ *                 # do not call releasebuffer
+ *                 info.obj = None
+ */
+  __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
+  if (__pyx_t_2) {
+    __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0);
+    __pyx_t_1 = __pyx_t_3;
+  } else {
+    __pyx_t_1 = __pyx_t_2;
+  }
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
+ *             if not hasfields and not copy_shape:
+ *                 # do not call releasebuffer
+ *                 info.obj = None             # <<<<<<<<<<<<<<
+ *             else:
+ *                 # need to call releasebuffer
+ */
+    __Pyx_INCREF(Py_None);
+    __Pyx_GIVEREF(Py_None);
+    __Pyx_GOTREF(__pyx_v_info->obj);
+    __Pyx_DECREF(__pyx_v_info->obj);
+    __pyx_v_info->obj = Py_None;
+    goto __pyx_L10;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+ *             else:
+ *                 # need to call releasebuffer
+ *                 info.obj = self             # <<<<<<<<<<<<<<
+ * 
+ *             if not hasfields:
+ */
+    __Pyx_INCREF(((PyObject *)__pyx_v_self));
+    __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
+    __Pyx_GOTREF(__pyx_v_info->obj);
+    __Pyx_DECREF(__pyx_v_info->obj);
+    __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
+  }
+  __pyx_L10:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253
+ *                 info.obj = self
+ * 
+ *             if not hasfields:             # <<<<<<<<<<<<<<
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ */
+  __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+ * 
+ *             if not hasfields:
+ *                 t = descr.type_num             # <<<<<<<<<<<<<<
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ */
+    __pyx_t_5 = __pyx_v_descr->type_num;
+    __pyx_v_t = __pyx_t_5;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
+ *             if not hasfields:
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
+ */
+    __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0);
+    if (__pyx_t_1) {
+      __pyx_t_2 = (__pyx_v_little_endian != 0);
+    } else {
+      __pyx_t_2 = __pyx_t_1;
+    }
+    if (!__pyx_t_2) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+ *                 t = descr.type_num
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
+ *                     raise ValueError(u"Non-native byte order not supported")
+ *                 if   t == NPY_BYTE:        f = "b"
+ */
+      __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0);
+      if (__pyx_t_1) {
+        __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0);
+        __pyx_t_7 = __pyx_t_3;
+      } else {
+        __pyx_t_7 = __pyx_t_1;
+      }
+      __pyx_t_1 = __pyx_t_7;
+    } else {
+      __pyx_t_1 = __pyx_t_2;
+    }
+    if (__pyx_t_1) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"
+ */
+      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+    switch (__pyx_v_t) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")
+ *                 if   t == NPY_BYTE:        f = "b"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_UBYTE:       f = "B"
+ *                 elif t == NPY_SHORT:       f = "h"
+ */
+      case NPY_BYTE:
+      __pyx_v_f = __pyx_k_b;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+ *                     raise ValueError(u"Non-native byte order not supported")
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_SHORT:       f = "h"
+ *                 elif t == NPY_USHORT:      f = "H"
+ */
+      case NPY_UBYTE:
+      __pyx_v_f = __pyx_k_B;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"
+ *                 elif t == NPY_SHORT:       f = "h"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_USHORT:      f = "H"
+ *                 elif t == NPY_INT:         f = "i"
+ */
+      case NPY_SHORT:
+      __pyx_v_f = __pyx_k_h;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+ *                 elif t == NPY_UBYTE:       f = "B"
+ *                 elif t == NPY_SHORT:       f = "h"
+ *                 elif t == NPY_USHORT:      f = "H"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_INT:         f = "i"
+ *                 elif t == NPY_UINT:        f = "I"
+ */
+      case NPY_USHORT:
+      __pyx_v_f = __pyx_k_H;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+ *                 elif t == NPY_SHORT:       f = "h"
+ *                 elif t == NPY_USHORT:      f = "H"
+ *                 elif t == NPY_INT:         f = "i"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_UINT:        f = "I"
+ *                 elif t == NPY_LONG:        f = "l"
+ */
+      case NPY_INT:
+      __pyx_v_f = __pyx_k_i;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+ *                 elif t == NPY_USHORT:      f = "H"
+ *                 elif t == NPY_INT:         f = "i"
+ *                 elif t == NPY_UINT:        f = "I"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_LONG:        f = "l"
+ *                 elif t == NPY_ULONG:       f = "L"
+ */
+      case NPY_UINT:
+      __pyx_v_f = __pyx_k_I;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+ *                 elif t == NPY_INT:         f = "i"
+ *                 elif t == NPY_UINT:        f = "I"
+ *                 elif t == NPY_LONG:        f = "l"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_ULONG:       f = "L"
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ */
+      case NPY_LONG:
+      __pyx_v_f = __pyx_k_l;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+ *                 elif t == NPY_UINT:        f = "I"
+ *                 elif t == NPY_LONG:        f = "l"
+ *                 elif t == NPY_ULONG:       f = "L"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ */
+      case NPY_ULONG:
+      __pyx_v_f = __pyx_k_L;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+ *                 elif t == NPY_LONG:        f = "l"
+ *                 elif t == NPY_ULONG:       f = "L"
+ *                 elif t == NPY_LONGLONG:    f = "q"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ *                 elif t == NPY_FLOAT:       f = "f"
+ */
+      case NPY_LONGLONG:
+      __pyx_v_f = __pyx_k_q;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+ *                 elif t == NPY_ULONG:       f = "L"
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ *                 elif t == NPY_ULONGLONG:   f = "Q"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_FLOAT:       f = "f"
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ */
+      case NPY_ULONGLONG:
+      __pyx_v_f = __pyx_k_Q;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+ *                 elif t == NPY_LONGLONG:    f = "q"
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ *                 elif t == NPY_FLOAT:       f = "f"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ */
+      case NPY_FLOAT:
+      __pyx_v_f = __pyx_k_f;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+ *                 elif t == NPY_ULONGLONG:   f = "Q"
+ *                 elif t == NPY_FLOAT:       f = "f"
+ *                 elif t == NPY_DOUBLE:      f = "d"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ */
+      case NPY_DOUBLE:
+      __pyx_v_f = __pyx_k_d;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+ *                 elif t == NPY_FLOAT:       f = "f"
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ */
+      case NPY_LONGDOUBLE:
+      __pyx_v_f = __pyx_k_g;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+ *                 elif t == NPY_DOUBLE:      f = "d"
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ *                 elif t == NPY_CFLOAT:      f = "Zf"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ */
+      case NPY_CFLOAT:
+      __pyx_v_f = __pyx_k_Zf;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+ *                 elif t == NPY_LONGDOUBLE:  f = "g"
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ *                 elif t == NPY_OBJECT:      f = "O"
+ */
+      case NPY_CDOUBLE:
+      __pyx_v_f = __pyx_k_Zd;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+ *                 elif t == NPY_CFLOAT:      f = "Zf"
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"             # <<<<<<<<<<<<<<
+ *                 elif t == NPY_OBJECT:      f = "O"
+ *                 else:
+ */
+      case NPY_CLONGDOUBLE:
+      __pyx_v_f = __pyx_k_Zg;
+      break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ *                 elif t == NPY_CDOUBLE:     f = "Zd"
+ *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
+ *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+      case NPY_OBJECT:
+      __pyx_v_f = __pyx_k_O;
+      break;
+      default:
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+ *                 elif t == NPY_OBJECT:      f = "O"
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
+ *                 info.format = f
+ *                 return
+ */
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_8);
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8);
+      __Pyx_GIVEREF(__pyx_t_8);
+      __pyx_t_8 = 0;
+      __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_8);
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __Pyx_Raise(__pyx_t_8, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      break;
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+ *                 else:
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ *                 info.format = f             # <<<<<<<<<<<<<<
+ *                 return
+ *             else:
+ */
+    __pyx_v_info->format = __pyx_v_f;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
+ *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ *                 info.format = f
+ *                 return             # <<<<<<<<<<<<<<
+ *             else:
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
+ */
+    __pyx_r = 0;
+    goto __pyx_L0;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+ *                 return
+ *             else:
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)             # <<<<<<<<<<<<<<
+ *                 info.format[0] = c'^' # Native data types, manual alignment
+ *                 offset = 0
+ */
+    __pyx_v_info->format = ((char *)malloc(255));
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+ *             else:
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
+ *                 info.format[0] = c'^' # Native data types, manual alignment             # <<<<<<<<<<<<<<
+ *                 offset = 0
+ *                 f = _util_dtypestring(descr, info.format + 1,
+ */
+    (__pyx_v_info->format[0]) = '^';
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282
+ *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
+ *                 info.format[0] = c'^' # Native data types, manual alignment
+ *                 offset = 0             # <<<<<<<<<<<<<<
+ *                 f = _util_dtypestring(descr, info.format + 1,
+ *                                       info.format + _buffer_format_string_len,
+ */
+    __pyx_v_offset = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+ *                 info.format[0] = c'^' # Native data types, manual alignment
+ *                 offset = 0
+ *                 f = _util_dtypestring(descr, info.format + 1,             # <<<<<<<<<<<<<<
+ *                                       info.format + _buffer_format_string_len,
+ *                                       &offset)
+ */
+    __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_v_f = __pyx_t_9;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+ *                                       info.format + _buffer_format_string_len,
+ *                                       &offset)
+ *                 f[0] = c'\0' # Terminate format string             # <<<<<<<<<<<<<<
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ */
+    (__pyx_v_f[0]) = '\x00';
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+ *         # experimental exception made for __getbuffer__ and __releasebuffer__
+ *         # -- the details of this may change.
+ *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
+ *             # This implementation of getbuffer is geared towards Cython
+ *             # requirements, and does not yet fullfill the PEP.
+ */
+
+  /* function exit code */
+  __pyx_r = 0;
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_8);
+  __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = -1;
+  if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
+    __Pyx_GOTREF(__pyx_v_info->obj);
+    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
+  }
+  goto __pyx_L2;
+  __pyx_L0:;
+  if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
+    __Pyx_GOTREF(Py_None);
+    __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
+  }
+  __pyx_L2:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_descr);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+ *                 f[0] = c'\0' # Terminate format string
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
+  __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  __Pyx_RefNannySetupContext("__releasebuffer__", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+  __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)             # <<<<<<<<<<<<<<
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ *                 stdlib.free(info.strides)
+ */
+    free(__pyx_v_info->format);
+    goto __pyx_L3;
+  }
+  __pyx_L3:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
+ *                 stdlib.free(info.strides)
+ *                 # info.shape was stored after info.strides in the same block
+ */
+  __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+ *                 stdlib.free(info.format)
+ *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ *                 stdlib.free(info.strides)             # <<<<<<<<<<<<<<
+ *                 # info.shape was stored after info.strides in the same block
+ * 
+ */
+    free(__pyx_v_info->strides);
+    goto __pyx_L4;
+  }
+  __pyx_L4:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+ *                 f[0] = c'\0' # Terminate format string
+ * 
+ *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
+ *             if PyArray_HASFIELDS(self):
+ *                 stdlib.free(info.format)
+ */
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+ * ctypedef npy_cdouble     complex_t
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):
+ *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+ * ctypedef npy_cdouble     complex_t
+ * 
+ * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+ *     return PyArray_MultiIterNew(1, <void*>a)
+ * 
+ * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+ *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+ * 
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+ *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+ * 
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
+ */
+  __Pyx_XDECREF(__pyx_r);
+  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_r = __pyx_t_1;
+  __pyx_t_1 = 0;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+ *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+ * 
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
+ *     # Recursive utility function used in __getbuffer__ to get format
+ *     # string. The new location in the format string is returned.
+ */
+
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
+  PyArray_Descr *__pyx_v_child = 0;
+  int __pyx_v_endian_detector;
+  int __pyx_v_little_endian;
+  PyObject *__pyx_v_fields = 0;
+  PyObject *__pyx_v_childname = NULL;
+  PyObject *__pyx_v_new_offset = NULL;
+  PyObject *__pyx_v_t = NULL;
+  char *__pyx_r;
+  __Pyx_RefNannyDeclarations
+  PyObject *__pyx_t_1 = NULL;
+  Py_ssize_t __pyx_t_2;
+  PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  int __pyx_t_5;
+  int __pyx_t_6;
+  int __pyx_t_7;
+  int __pyx_t_8;
+  int __pyx_t_9;
+  long __pyx_t_10;
+  char *__pyx_t_11;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannySetupContext("_util_dtypestring", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
+ *     cdef int delta_offset
+ *     cdef tuple i
+ *     cdef int endian_detector = 1             # <<<<<<<<<<<<<<
+ *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ *     cdef tuple fields
+ */
+  __pyx_v_endian_detector = 1;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
+ *     cdef tuple i
+ *     cdef int endian_detector = 1
+ *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
+ *     cdef tuple fields
+ * 
+ */
+  __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
+ *     cdef tuple fields
+ * 
+ *     for childname in descr.names:             # <<<<<<<<<<<<<<
+ *         fields = descr.fields[childname]
+ *         child, new_offset = fields
+ */
+  if (unlikely(__pyx_v_descr->names == Py_None)) {
+    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
+  for (;;) {
+    if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+    #if CYTHON_COMPILING_IN_CPYTHON
+    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    #else
+    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    #endif
+    __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
+    __pyx_t_3 = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795
+ * 
+ *     for childname in descr.names:
+ *         fields = descr.fields[childname]             # <<<<<<<<<<<<<<
+ *         child, new_offset = fields
+ * 
+ */
+    __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_3);
+    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
+    __pyx_t_3 = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796
+ *     for childname in descr.names:
+ *         fields = descr.fields[childname]
+ *         child, new_offset = fields             # <<<<<<<<<<<<<<
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:
+ */
+    if (likely(__pyx_v_fields != Py_None)) {
+      PyObject* sequence = __pyx_v_fields;
+      #if CYTHON_COMPILING_IN_CPYTHON
+      Py_ssize_t size = Py_SIZE(sequence);
+      #else
+      Py_ssize_t size = PySequence_Size(sequence);
+      #endif
+      if (unlikely(size != 2)) {
+        if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+        else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+      #if CYTHON_COMPILING_IN_CPYTHON
+      __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
+      __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); 
+      __Pyx_INCREF(__pyx_t_3);
+      __Pyx_INCREF(__pyx_t_4);
+      #else
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      #endif
+    } else {
+      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
+    __pyx_t_3 = 0;
+    __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
+    __pyx_t_4 = 0;
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+ *         child, new_offset = fields
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ */
+    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
+    if (__pyx_t_6) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or
+ */
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")
+ */
+    __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0);
+    if (__pyx_t_6) {
+      __pyx_t_7 = (__pyx_v_little_endian != 0);
+    } else {
+      __pyx_t_7 = __pyx_t_6;
+    }
+    if (!__pyx_t_7) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or
+ *             (child.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
+ *             raise ValueError(u"Non-native byte order not supported")
+ *             # One could encode it in the format string and have Cython
+ */
+      __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0);
+      if (__pyx_t_6) {
+        __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0);
+        __pyx_t_9 = __pyx_t_8;
+      } else {
+        __pyx_t_9 = __pyx_t_6;
+      }
+      __pyx_t_6 = __pyx_t_9;
+    } else {
+      __pyx_t_6 = __pyx_t_7;
+    }
+    if (__pyx_t_6) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+ *         if ((child.byteorder == c'>' and little_endian) or
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *             # One could encode it in the format string and have Cython
+ *             # complain instead, BUT: < and > in format strings also imply
+ */
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
+ * 
+ *         # Output padding bytes
+ *         while offset[0] < new_offset:             # <<<<<<<<<<<<<<
+ *             f[0] = 120 # "x"; pad byte
+ *             f += 1
+ */
+    while (1) {
+      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (!__pyx_t_6) break;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814
+ *         # Output padding bytes
+ *         while offset[0] < new_offset:
+ *             f[0] = 120 # "x"; pad byte             # <<<<<<<<<<<<<<
+ *             f += 1
+ *             offset[0] += 1
+ */
+      (__pyx_v_f[0]) = 120;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
+ *         while offset[0] < new_offset:
+ *             f[0] = 120 # "x"; pad byte
+ *             f += 1             # <<<<<<<<<<<<<<
+ *             offset[0] += 1
+ * 
+ */
+      __pyx_v_f = (__pyx_v_f + 1);
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+ *             f[0] = 120 # "x"; pad byte
+ *             f += 1
+ *             offset[0] += 1             # <<<<<<<<<<<<<<
+ * 
+ *         offset[0] += child.itemsize
+ */
+      __pyx_t_10 = 0;
+      (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1);
+    }
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+ *             offset[0] += 1
+ * 
+ *         offset[0] += child.itemsize             # <<<<<<<<<<<<<<
+ * 
+ *         if not PyDataType_HASFIELDS(child):
+ */
+    __pyx_t_10 = 0;
+    (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize);
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
+ *         offset[0] += child.itemsize
+ * 
+ *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
+ *             t = child.type_num
+ *             if end - f < 5:
+ */
+    __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
+    if (__pyx_t_6) {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+ * 
+ *         if not PyDataType_HASFIELDS(child):
+ *             t = child.type_num             # <<<<<<<<<<<<<<
+ *             if end - f < 5:
+ *                 raise RuntimeError(u"Format string allocated too short.")
+ */
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
+      __pyx_t_4 = 0;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
+ *         if not PyDataType_HASFIELDS(child):
+ *             t = child.type_num
+ *             if end - f < 5:             # <<<<<<<<<<<<<<
+ *                 raise RuntimeError(u"Format string allocated too short.")
+ * 
+ */
+      __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
+      if (__pyx_t_6) {
+
+        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+ *             t = child.type_num
+ *             if end - f < 5:
+ *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
+ * 
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ */
+        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_4);
+        __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+ * 
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ *             if   t == NPY_BYTE:        f[0] =  98 #"b"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 98;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ *             if   t == NPY_BYTE:        f[0] =  98 #"b"
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 66;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
+ *             if   t == NPY_BYTE:        f[0] =  98 #"b"
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 104;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+ *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 72;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+ *             elif t == NPY_SHORT:       f[0] = 104 #"h"
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ *             elif t == NPY_INT:         f[0] = 105 #"i"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 105;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+ *             elif t == NPY_USHORT:      f[0] =  72 #"H"
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 73;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+ *             elif t == NPY_INT:         f[0] = 105 #"i"
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 108;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+ *             elif t == NPY_UINT:        f[0] =  73 #"I"
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 76;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+ *             elif t == NPY_LONG:        f[0] = 108 #"l"
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 113;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+ *             elif t == NPY_ULONG:       f[0] = 76  #"L"
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 81;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+ *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 102;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+ *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 100;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+ *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"             # <<<<<<<<<<<<<<
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 103;
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+ *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf             # <<<<<<<<<<<<<<
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 90;
+        (__pyx_v_f[1]) = 102;
+        __pyx_v_f = (__pyx_v_f + 1);
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+ *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd             # <<<<<<<<<<<<<<
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 90;
+        (__pyx_v_f[1]) = 100;
+        __pyx_v_f = (__pyx_v_f + 1);
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+ *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg             # <<<<<<<<<<<<<<
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
+ *             else:
+ */
+      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 90;
+        (__pyx_v_f[1]) = 103;
+        __pyx_v_f = (__pyx_v_f + 1);
+        goto __pyx_L11;
+      }
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+ *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
+ *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"             # <<<<<<<<<<<<<<
+ *             else:
+ *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_4);
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (__pyx_t_6) {
+        (__pyx_v_f[0]) = 79;
+        goto __pyx_L11;
+      }
+      /*else*/ {
+
+        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+ *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
+ *             else:
+ *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
+ *             f += 1
+ *         else:
+ */
+        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_4);
+        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
+        __Pyx_GIVEREF(__pyx_t_3);
+        __pyx_t_3 = 0;
+        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+        __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+        __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      }
+      __pyx_L11:;
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+ *             else:
+ *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ *             f += 1             # <<<<<<<<<<<<<<
+ *         else:
+ *             # Cython ignores struct boundary information ("T{...}"),
+ */
+      __pyx_v_f = (__pyx_v_f + 1);
+      goto __pyx_L9;
+    }
+    /*else*/ {
+
+      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849
+ *             # Cython ignores struct boundary information ("T{...}"),
+ *             # so don't output it
+ *             f = _util_dtypestring(child, f, end, offset)             # <<<<<<<<<<<<<<
+ *     return f
+ * 
+ */
+      __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_v_f = __pyx_t_11;
+    }
+    __pyx_L9:;
+  }
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850
+ *             # so don't output it
+ *             f = _util_dtypestring(child, f, end, offset)
+ *     return f             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+  __pyx_r = __pyx_v_f;
+  goto __pyx_L0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+ *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+ * 
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
+ *     # Recursive utility function used in __getbuffer__ to get format
+ *     # string. The new location in the format string is returned.
+ */
+
+  /* function exit code */
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = NULL;
+  __pyx_L0:;
+  __Pyx_XDECREF((PyObject *)__pyx_v_child);
+  __Pyx_XDECREF(__pyx_v_fields);
+  __Pyx_XDECREF(__pyx_v_childname);
+  __Pyx_XDECREF(__pyx_v_new_offset);
+  __Pyx_XDECREF(__pyx_v_t);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+ * 
+ * 
+ * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
+ *      cdef PyObject* baseptr
+ *      if base is None:
+ */
+
+static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
+  PyObject *__pyx_v_baseptr;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  int __pyx_t_2;
+  __Pyx_RefNannySetupContext("set_array_base", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
+ * cdef inline void set_array_base(ndarray arr, object base):
+ *      cdef PyObject* baseptr
+ *      if base is None:             # <<<<<<<<<<<<<<
+ *          baseptr = NULL
+ *      else:
+ */
+  __pyx_t_1 = (__pyx_v_base == Py_None);
+  __pyx_t_2 = (__pyx_t_1 != 0);
+  if (__pyx_t_2) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+ *      cdef PyObject* baseptr
+ *      if base is None:
+ *          baseptr = NULL             # <<<<<<<<<<<<<<
+ *      else:
+ *          Py_INCREF(base) # important to do this before decref below!
+ */
+    __pyx_v_baseptr = NULL;
+    goto __pyx_L3;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+ *          baseptr = NULL
+ *      else:
+ *          Py_INCREF(base) # important to do this before decref below!             # <<<<<<<<<<<<<<
+ *          baseptr = <PyObject*>base
+ *      Py_XDECREF(arr.base)
+ */
+    Py_INCREF(__pyx_v_base);
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+ *      else:
+ *          Py_INCREF(base) # important to do this before decref below!
+ *          baseptr = <PyObject*>base             # <<<<<<<<<<<<<<
+ *      Py_XDECREF(arr.base)
+ *      arr.base = baseptr
+ */
+    __pyx_v_baseptr = ((PyObject *)__pyx_v_base);
+  }
+  __pyx_L3:;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973
+ *          Py_INCREF(base) # important to do this before decref below!
+ *          baseptr = <PyObject*>base
+ *      Py_XDECREF(arr.base)             # <<<<<<<<<<<<<<
+ *      arr.base = baseptr
+ * 
+ */
+  Py_XDECREF(__pyx_v_arr->base);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+ *          baseptr = <PyObject*>base
+ *      Py_XDECREF(arr.base)
+ *      arr.base = baseptr             # <<<<<<<<<<<<<<
+ * 
+ * cdef inline object get_array_base(ndarray arr):
+ */
+  __pyx_v_arr->base = __pyx_v_baseptr;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+ * 
+ * 
+ * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
+ *      cdef PyObject* baseptr
+ *      if base is None:
+ */
+
+  /* function exit code */
+  __Pyx_RefNannyFinishContext();
+}
+
+/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+ *      arr.base = baseptr
+ * 
+ * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
+ *     if arr.base is NULL:
+ *         return None
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
+  PyObject *__pyx_r = NULL;
+  __Pyx_RefNannyDeclarations
+  int __pyx_t_1;
+  __Pyx_RefNannySetupContext("get_array_base", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+ * 
+ * cdef inline object get_array_base(ndarray arr):
+ *     if arr.base is NULL:             # <<<<<<<<<<<<<<
+ *         return None
+ *     else:
+ */
+  __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
+  if (__pyx_t_1) {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978
+ * cdef inline object get_array_base(ndarray arr):
+ *     if arr.base is NULL:
+ *         return None             # <<<<<<<<<<<<<<
+ *     else:
+ *         return <object>arr.base
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __Pyx_INCREF(Py_None);
+    __pyx_r = Py_None;
+    goto __pyx_L0;
+  }
+  /*else*/ {
+
+    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
+ *         return None
+ *     else:
+ *         return <object>arr.base             # <<<<<<<<<<<<<<
+ */
+    __Pyx_XDECREF(__pyx_r);
+    __Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
+    __pyx_r = ((PyObject *)__pyx_v_arr->base);
+    goto __pyx_L0;
+  }
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+ *      arr.base = baseptr
+ * 
+ * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
+ *     if arr.base is NULL:
+ *         return None
+ */
+
+  /* function exit code */
+  __pyx_L0:;
+  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+static PyMethodDef __pyx_methods[] = {
+  {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef __pyx_moduledef = {
+  #if PY_VERSION_HEX < 0x03020000
+    { PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
+  #else
+    PyModuleDef_HEAD_INIT,
+  #endif
+    __Pyx_NAMESTR("__subsample"),
+    0, /* m_doc */
+    -1, /* m_size */
+    __pyx_methods /* m_methods */,
+    NULL, /* m_reload */
+    NULL, /* m_traverse */
+    NULL, /* m_clear */
+    NULL /* m_free */
+};
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+  {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
+  {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
+  {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
+  {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
+  {&__pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_k_Users_jairideout_dev_scikit_bio, sizeof(__pyx_k_Users_jairideout_dev_scikit_bio), 0, 0, 1, 0},
+  {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
+  {&__pyx_n_s_cnt, __pyx_k_cnt, sizeof(__pyx_k_cnt), 0, 0, 1, 1},
+  {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1},
+  {&__pyx_n_s_counts_sum, __pyx_k_counts_sum, sizeof(__pyx_k_counts_sum), 0, 0, 1, 1},
+  {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
+  {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
+  {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
+  {&__pyx_n_s_idx, __pyx_k_idx, sizeof(__pyx_k_idx), 0, 0, 1, 1},
+  {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+  {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
+  {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+  {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
+  {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
+  {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
+  {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
+  {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
+  {&__pyx_n_s_permutation, __pyx_k_permutation, sizeof(__pyx_k_permutation), 0, 0, 1, 1},
+  {&__pyx_n_s_permuted, __pyx_k_permuted, sizeof(__pyx_k_permuted), 0, 0, 1, 1},
+  {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
+  {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1},
+  {&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1},
+  {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
+  {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1},
+  {&__pyx_n_s_skbio_stats___subsample, __pyx_k_skbio_stats___subsample, sizeof(__pyx_k_skbio_stats___subsample), 0, 0, 1, 1},
+  {&__pyx_n_s_subsample_counts_without_replac, __pyx_k_subsample_counts_without_replac, sizeof(__pyx_k_subsample_counts_without_replac), 0, 0, 1, 1},
+  {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+  {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
+  {&__pyx_n_s_unpacked, __pyx_k_unpacked, sizeof(__pyx_k_unpacked), 0, 0, 1, 1},
+  {&__pyx_n_s_unpacked_idx, __pyx_k_unpacked_idx, sizeof(__pyx_k_unpacked_idx), 0, 0, 1, 1},
+  {&__pyx_n_s_zeros_like, __pyx_k_zeros_like, sizeof(__pyx_k_zeros_like), 0, 0, 1, 1},
+  {0, 0, 0, 0, 0, 0, 0}
+};
+static int __Pyx_InitCachedBuiltins(void) {
+  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  return 0;
+  __pyx_L1_error:;
+  return -1;
+}
+
+static int __Pyx_InitCachedConstants(void) {
+  __Pyx_RefNannyDeclarations
+  __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+ *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ */
+  __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple_);
+  __Pyx_GIVEREF(__pyx_tuple_);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+ *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
+ * 
+ *             info.buf = PyArray_DATA(self)
+ */
+  __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__2);
+  __Pyx_GIVEREF(__pyx_tuple__2);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ *                 if ((descr.byteorder == c'>' and little_endian) or
+ *                     (descr.byteorder == c'<' and not little_endian)):
+ *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *                 if   t == NPY_BYTE:        f = "b"
+ *                 elif t == NPY_UBYTE:       f = "B"
+ */
+  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__3);
+  __Pyx_GIVEREF(__pyx_tuple__3);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+ * 
+ *         if (end - f) - <int>(new_offset - offset[0]) < 15:
+ *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
+ * 
+ *         if ((child.byteorder == c'>' and little_endian) or
+ */
+  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__4);
+  __Pyx_GIVEREF(__pyx_tuple__4);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+ *         if ((child.byteorder == c'>' and little_endian) or
+ *             (child.byteorder == c'<' and not little_endian)):
+ *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
+ *             # One could encode it in the format string and have Cython
+ *             # complain instead, BUT: < and > in format strings also imply
+ */
+  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__5);
+  __Pyx_GIVEREF(__pyx_tuple__5);
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+ *             t = child.type_num
+ *             if end - f < 5:
+ *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
+ * 
+ *             # Until ticket #99 is fixed, use integers to avoid warnings
+ */
+  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__6);
+  __Pyx_GIVEREF(__pyx_tuple__6);
+
+  /* "skbio/stats/__subsample.pyx":15
+ * 
+ * 
+ * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
+ *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
+ *     cdef:
+ */
+  __pyx_tuple__7 = PyTuple_Pack(11, __pyx_n_s_counts, __pyx_n_s_n, __pyx_n_s_counts_sum, __pyx_n_s_result, __pyx_n_s_permuted, __pyx_n_s_unpacked, __pyx_n_s_cnt, __pyx_n_s_unpacked_idx, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_idx); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_tuple__7);
+  __Pyx_GIVEREF(__pyx_tuple__7);
+  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_n_s_subsample_counts_without_replac, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_RefNannyFinishContext();
+  return 0;
+  __pyx_L1_error:;
+  __Pyx_RefNannyFinishContext();
+  return -1;
+}
+
+static int __Pyx_InitGlobals(void) {
+  if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  return 0;
+  __pyx_L1_error:;
+  return -1;
+}
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC init__subsample(void); /*proto*/
+PyMODINIT_FUNC init__subsample(void)
+#else
+PyMODINIT_FUNC PyInit___subsample(void); /*proto*/
+PyMODINIT_FUNC PyInit___subsample(void)
+#endif
+{
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_lineno = 0;
+  const char *__pyx_filename = NULL;
+  int __pyx_clineno = 0;
+  __Pyx_RefNannyDeclarations
+  #if CYTHON_REFNANNY
+  __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+  if (!__Pyx_RefNanny) {
+      PyErr_Clear();
+      __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+      if (!__Pyx_RefNanny)
+          Py_FatalError("failed to import 'refnanny' module");
+  }
+  #endif
+  __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit___subsample(void)", 0);
+  if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #ifdef __Pyx_CyFunction_USED
+  if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  #ifdef __Pyx_FusedFunction_USED
+  if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  #ifdef __Pyx_Generator_USED
+  if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  /*--- Library function declarations ---*/
+  /*--- Threads initialization code ---*/
+  #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+  #ifdef WITH_THREAD /* Python build with threading support? */
+  PyEval_InitThreads();
+  #endif
+  #endif
+  /*--- Module creation code ---*/
+  #if PY_MAJOR_VERSION < 3
+  __pyx_m = Py_InitModule4(__Pyx_NAMESTR("__subsample"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+  #else
+  __pyx_m = PyModule_Create(&__pyx_moduledef);
+  #endif
+  if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  Py_INCREF(__pyx_d);
+  __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #if CYTHON_COMPILING_IN_PYPY
+  Py_INCREF(__pyx_b);
+  #endif
+  if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  /*--- Initialize various global constants etc. ---*/
+  if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+  if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  #endif
+  if (__pyx_module_is_main_skbio__stats____subsample) {
+    if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  }
+  #if PY_MAJOR_VERSION >= 3
+  {
+    PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!PyDict_GetItemString(modules, "skbio.stats.__subsample")) {
+      if (unlikely(PyDict_SetItemString(modules, "skbio.stats.__subsample", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+  }
+  #endif
+  /*--- Builtin init code ---*/
+  if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*--- Constants init code ---*/
+  if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*--- Global init code ---*/
+  /*--- Variable export code ---*/
+  /*--- Function export code ---*/
+  /*--- Type init code ---*/
+  /*--- Type import code ---*/
+  __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", 
+  #if CYTHON_COMPILING_IN_PYPY
+  sizeof(PyTypeObject),
+  #else
+  sizeof(PyHeapTypeObject),
+  #endif
+  0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  /*--- Variable import code ---*/
+  /*--- Function import code ---*/
+  /*--- Execution code ---*/
+
+  /* "skbio/stats/__subsample.pyx":11
+ * from __future__ import absolute_import, division, print_function
+ * 
+ * import numpy as np             # <<<<<<<<<<<<<<
+ * cimport numpy as cnp
+ * 
+ */
+  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/stats/__subsample.pyx":15
+ * 
+ * 
+ * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
+ *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
+ *     cdef:
+ */
+  __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, NULL, __pyx_n_s_skbio_stats___subsample); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_subsample_counts_without_replac, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "skbio/stats/__subsample.pyx":1
+ * # ----------------------------------------------------------------------------             # <<<<<<<<<<<<<<
+ * # Copyright (c) 2013--, scikit-bio development team.
+ * #
+ */
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+ *      arr.base = baseptr
+ * 
+ * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
+ *     if arr.base is NULL:
+ *         return None
+ */
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  if (__pyx_m) {
+    __Pyx_AddTraceback("init skbio.stats.__subsample", __pyx_clineno, __pyx_lineno, __pyx_filename);
+    Py_DECREF(__pyx_m); __pyx_m = 0;
+  } else if (!PyErr_Occurred()) {
+    PyErr_SetString(PyExc_ImportError, "init skbio.stats.__subsample");
+  }
+  __pyx_L0:;
+  __Pyx_RefNannyFinishContext();
+  #if PY_MAJOR_VERSION < 3
+  return;
+  #else
+  return __pyx_m;
+  #endif
+}
+
+/* Runtime support code */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+    PyObject *m = NULL, *p = NULL;
+    void *r = NULL;
+    m = PyImport_ImportModule((char *)modname);
+    if (!m) goto end;
+    p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
+    if (!p) goto end;
+    r = PyLong_AsVoidPtr(p);
+end:
+    Py_XDECREF(p);
+    Py_XDECREF(m);
+    return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif /* CYTHON_REFNANNY */
+
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+    PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+    if (unlikely(!result)) {
+        PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+            "name '%U' is not defined", name);
+#else
+            "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+    }
+    return result;
+}
+
+static void __Pyx_RaiseArgtupleInvalid(
+    const char* func_name,
+    int exact,
+    Py_ssize_t num_min,
+    Py_ssize_t num_max,
+    Py_ssize_t num_found)
+{
+    Py_ssize_t num_expected;
+    const char *more_or_less;
+    if (num_found < num_min) {
+        num_expected = num_min;
+        more_or_less = "at least";
+    } else {
+        num_expected = num_max;
+        more_or_less = "at most";
+    }
+    if (exact) {
+        more_or_less = "exactly";
+    }
+    PyErr_Format(PyExc_TypeError,
+                 "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+                 func_name, more_or_less, num_expected,
+                 (num_expected == 1) ? "" : "s", num_found);
+}
+
+static void __Pyx_RaiseDoubleKeywordsError(
+    const char* func_name,
+    PyObject* kw_name)
+{
+    PyErr_Format(PyExc_TypeError,
+        #if PY_MAJOR_VERSION >= 3
+        "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+        #else
+        "%s() got multiple values for keyword argument '%s'", func_name,
+        PyString_AsString(kw_name));
+        #endif
+}
+
+static int __Pyx_ParseOptionalKeywords(
+    PyObject *kwds,
+    PyObject **argnames[],
+    PyObject *kwds2,
+    PyObject *values[],
+    Py_ssize_t num_pos_args,
+    const char* function_name)
+{
+    PyObject *key = 0, *value = 0;
+    Py_ssize_t pos = 0;
+    PyObject*** name;
+    PyObject*** first_kw_arg = argnames + num_pos_args;
+    while (PyDict_Next(kwds, &pos, &key, &value)) {
+        name = first_kw_arg;
+        while (*name && (**name != key)) name++;
+        if (*name) {
+            values[name-argnames] = value;
+            continue;
+        }
+        name = first_kw_arg;
+        #if PY_MAJOR_VERSION < 3
+        if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
+            while (*name) {
+                if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+                        && _PyString_Eq(**name, key)) {
+                    values[name-argnames] = value;
+                    break;
+                }
+                name++;
+            }
+            if (*name) continue;
+            else {
+                PyObject*** argname = argnames;
+                while (argname != first_kw_arg) {
+                    if ((**argname == key) || (
+                            (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+                             && _PyString_Eq(**argname, key))) {
+                        goto arg_passed_twice;
+                    }
+                    argname++;
+                }
+            }
+        } else
+        #endif
+        if (likely(PyUnicode_Check(key))) {
+            while (*name) {
+                int cmp = (**name == key) ? 0 :
+                #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+                    (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
+                #endif
+                    PyUnicode_Compare(**name, key);
+                if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+                if (cmp == 0) {
+                    values[name-argnames] = value;
+                    break;
+                }
+                name++;
+            }
+            if (*name) continue;
+            else {
+                PyObject*** argname = argnames;
+                while (argname != first_kw_arg) {
+                    int cmp = (**argname == key) ? 0 :
+                    #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+                        (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
+                    #endif
+                        PyUnicode_Compare(**argname, key);
+                    if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+                    if (cmp == 0) goto arg_passed_twice;
+                    argname++;
+                }
+            }
+        } else
+            goto invalid_keyword_type;
+        if (kwds2) {
+            if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+        } else {
+            goto invalid_keyword;
+        }
+    }
+    return 0;
+arg_passed_twice:
+    __Pyx_RaiseDoubleKeywordsError(function_name, key);
+    goto bad;
+invalid_keyword_type:
+    PyErr_Format(PyExc_TypeError,
+        "%.200s() keywords must be strings", function_name);
+    goto bad;
+invalid_keyword:
+    PyErr_Format(PyExc_TypeError,
+    #if PY_MAJOR_VERSION < 3
+        "%.200s() got an unexpected keyword argument '%.200s'",
+        function_name, PyString_AsString(key));
+    #else
+        "%s() got an unexpected keyword argument '%U'",
+        function_name, key);
+    #endif
+bad:
+    return -1;
+}
+
+static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
+    PyErr_Format(PyExc_TypeError,
+        "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
+        name, type->tp_name, Py_TYPE(obj)->tp_name);
+}
+static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
+    const char *name, int exact)
+{
+    if (unlikely(!type)) {
+        PyErr_SetString(PyExc_SystemError, "Missing type object");
+        return 0;
+    }
+    if (none_allowed && obj == Py_None) return 1;
+    else if (exact) {
+        if (likely(Py_TYPE(obj) == type)) return 1;
+        #if PY_MAJOR_VERSION == 2
+        else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
+        #endif
+    }
+    else {
+        if (likely(PyObject_TypeCheck(obj, type))) return 1;
+    }
+    __Pyx_RaiseArgumentTypeInvalid(name, obj, type);
+    return 0;
+}
+
+static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
+  unsigned int n = 1;
+  return *(unsigned char*)(&n) != 0;
+}
+static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
+                              __Pyx_BufFmt_StackElem* stack,
+                              __Pyx_TypeInfo* type) {
+  stack[0].field = &ctx->root;
+  stack[0].parent_offset = 0;
+  ctx->root.type = type;
+  ctx->root.name = "buffer dtype";
+  ctx->root.offset = 0;
+  ctx->head = stack;
+  ctx->head->field = &ctx->root;
+  ctx->fmt_offset = 0;
+  ctx->head->parent_offset = 0;
+  ctx->new_packmode = '@';
+  ctx->enc_packmode = '@';
+  ctx->new_count = 1;
+  ctx->enc_count = 0;
+  ctx->enc_type = 0;
+  ctx->is_complex = 0;
+  ctx->is_valid_array = 0;
+  ctx->struct_alignment = 0;
+  while (type->typegroup == 'S') {
+    ++ctx->head;
+    ctx->head->field = type->fields;
+    ctx->head->parent_offset = 0;
+    type = type->fields->type;
+  }
+}
+static int __Pyx_BufFmt_ParseNumber(const char** ts) {
+    int count;
+    const char* t = *ts;
+    if (*t < '0' || *t > '9') {
+      return -1;
+    } else {
+        count = *t++ - '0';
+        while (*t >= '0' && *t < '9') {
+            count *= 10;
+            count += *t++ - '0';
+        }
+    }
+    *ts = t;
+    return count;
+}
+static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
+    int number = __Pyx_BufFmt_ParseNumber(ts);
+    if (number == -1) /* First char was not a digit */
+        PyErr_Format(PyExc_ValueError,\
+                     "Does not understand character buffer dtype format string ('%c')", **ts);
+    return number;
+}
+static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
+  PyErr_Format(PyExc_ValueError,
+               "Unexpected format string character: '%c'", ch);
+}
+static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
+  switch (ch) {
+    case 'c': return "'char'";
+    case 'b': return "'signed char'";
+    case 'B': return "'unsigned char'";
+    case 'h': return "'short'";
+    case 'H': return "'unsigned short'";
+    case 'i': return "'int'";
+    case 'I': return "'unsigned int'";
+    case 'l': return "'long'";
+    case 'L': return "'unsigned long'";
+    case 'q': return "'long long'";
+    case 'Q': return "'unsigned long long'";
+    case 'f': return (is_complex ? "'complex float'" : "'float'");
+    case 'd': return (is_complex ? "'complex double'" : "'double'");
+    case 'g': return (is_complex ? "'complex long double'" : "'long double'");
+    case 'T': return "a struct";
+    case 'O': return "Python object";
+    case 'P': return "a pointer";
+    case 's': case 'p': return "a string";
+    case 0: return "end";
+    default: return "unparseable format string";
+  }
+}
+static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
+  switch (ch) {
+    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return 2;
+    case 'i': case 'I': case 'l': case 'L': return 4;
+    case 'q': case 'Q': return 8;
+    case 'f': return (is_complex ? 8 : 4);
+    case 'd': return (is_complex ? 16 : 8);
+    case 'g': {
+      PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
+      return 0;
+    }
+    case 'O': case 'P': return sizeof(void*);
+    default:
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+}
+static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
+  switch (ch) {
+    case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return sizeof(short);
+    case 'i': case 'I': return sizeof(int);
+    case 'l': case 'L': return sizeof(long);
+    #ifdef HAVE_LONG_LONG
+    case 'q': case 'Q': return sizeof(PY_LONG_LONG);
+    #endif
+    case 'f': return sizeof(float) * (is_complex ? 2 : 1);
+    case 'd': return sizeof(double) * (is_complex ? 2 : 1);
+    case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
+    case 'O': case 'P': return sizeof(void*);
+    default: {
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+  }
+}
+typedef struct { char c; short x; } __Pyx_st_short;
+typedef struct { char c; int x; } __Pyx_st_int;
+typedef struct { char c; long x; } __Pyx_st_long;
+typedef struct { char c; float x; } __Pyx_st_float;
+typedef struct { char c; double x; } __Pyx_st_double;
+typedef struct { char c; long double x; } __Pyx_st_longdouble;
+typedef struct { char c; void *x; } __Pyx_st_void_p;
+#ifdef HAVE_LONG_LONG
+typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
+#endif
+static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
+  switch (ch) {
+    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
+    case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
+    case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
+#ifdef HAVE_LONG_LONG
+    case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
+#endif
+    case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
+    case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
+    case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
+    case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
+    default:
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+}
+/* These are for computing the padding at the end of the struct to align
+   on the first member of the struct. This will probably the same as above,
+   but we don't have any guarantees.
+ */
+typedef struct { short x; char c; } __Pyx_pad_short;
+typedef struct { int x; char c; } __Pyx_pad_int;
+typedef struct { long x; char c; } __Pyx_pad_long;
+typedef struct { float x; char c; } __Pyx_pad_float;
+typedef struct { double x; char c; } __Pyx_pad_double;
+typedef struct { long double x; char c; } __Pyx_pad_longdouble;
+typedef struct { void *x; char c; } __Pyx_pad_void_p;
+#ifdef HAVE_LONG_LONG
+typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
+#endif
+static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
+  switch (ch) {
+    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
+    case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
+    case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
+    case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
+#ifdef HAVE_LONG_LONG
+    case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
+#endif
+    case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
+    case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
+    case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
+    case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
+    default:
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+}
+static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
+  switch (ch) {
+    case 'c':
+        return 'H';
+    case 'b': case 'h': case 'i':
+    case 'l': case 'q': case 's': case 'p':
+        return 'I';
+    case 'B': case 'H': case 'I': case 'L': case 'Q':
+        return 'U';
+    case 'f': case 'd': case 'g':
+        return (is_complex ? 'C' : 'R');
+    case 'O':
+        return 'O';
+    case 'P':
+        return 'P';
+    default: {
+      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
+      return 0;
+    }
+  }
+}
+static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
+  if (ctx->head == NULL || ctx->head->field == &ctx->root) {
+    const char* expected;
+    const char* quote;
+    if (ctx->head == NULL) {
+      expected = "end";
+      quote = "";
+    } else {
+      expected = ctx->head->field->type->name;
+      quote = "'";
+    }
+    PyErr_Format(PyExc_ValueError,
+                 "Buffer dtype mismatch, expected %s%s%s but got %s",
+                 quote, expected, quote,
+                 __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
+  } else {
+    __Pyx_StructField* field = ctx->head->field;
+    __Pyx_StructField* parent = (ctx->head - 1)->field;
+    PyErr_Format(PyExc_ValueError,
+                 "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
+                 field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
+                 parent->type->name, field->name);
+  }
+}
+static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
+  char group;
+  size_t size, offset, arraysize = 1;
+  if (ctx->enc_type == 0) return 0;
+  if (ctx->head->field->type->arraysize[0]) {
+    int i, ndim = 0;
+    if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
+        ctx->is_valid_array = ctx->head->field->type->ndim == 1;
+        ndim = 1;
+        if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
+            PyErr_Format(PyExc_ValueError,
+                         "Expected a dimension of size %zu, got %zu",
+                         ctx->head->field->type->arraysize[0], ctx->enc_count);
+            return -1;
+        }
+    }
+    if (!ctx->is_valid_array) {
+      PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
+                   ctx->head->field->type->ndim, ndim);
+      return -1;
+    }
+    for (i = 0; i < ctx->head->field->type->ndim; i++) {
+      arraysize *= ctx->head->field->type->arraysize[i];
+    }
+    ctx->is_valid_array = 0;
+    ctx->enc_count = 1;
+  }
+  group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
+  do {
+    __Pyx_StructField* field = ctx->head->field;
+    __Pyx_TypeInfo* type = field->type;
+    if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
+      size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
+    } else {
+      size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
+    }
+    if (ctx->enc_packmode == '@') {
+      size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
+      size_t align_mod_offset;
+      if (align_at == 0) return -1;
+      align_mod_offset = ctx->fmt_offset % align_at;
+      if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
+      if (ctx->struct_alignment == 0)
+          ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
+                                                                 ctx->is_complex);
+    }
+    if (type->size != size || type->typegroup != group) {
+      if (type->typegroup == 'C' && type->fields != NULL) {
+        size_t parent_offset = ctx->head->parent_offset + field->offset;
+        ++ctx->head;
+        ctx->head->field = type->fields;
+        ctx->head->parent_offset = parent_offset;
+        continue;
+      }
+      if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
+      } else {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return -1;
+      }
+    }
+    offset = ctx->head->parent_offset + field->offset;
+    if (ctx->fmt_offset != offset) {
+      PyErr_Format(PyExc_ValueError,
+                   "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
+                   (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
+      return -1;
+    }
+    ctx->fmt_offset += size;
+    if (arraysize)
+      ctx->fmt_offset += (arraysize - 1) * size;
+    --ctx->enc_count; /* Consume from buffer string */
+    while (1) {
+      if (field == &ctx->root) {
+        ctx->head = NULL;
+        if (ctx->enc_count != 0) {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return -1;
+        }
+        break; /* breaks both loops as ctx->enc_count == 0 */
+      }
+      ctx->head->field = ++field;
+      if (field->type == NULL) {
+        --ctx->head;
+        field = ctx->head->field;
+        continue;
+      } else if (field->type->typegroup == 'S') {
+        size_t parent_offset = ctx->head->parent_offset + field->offset;
+        if (field->type->fields->type == NULL) continue; /* empty struct */
+        field = field->type->fields;
+        ++ctx->head;
+        ctx->head->field = field;
+        ctx->head->parent_offset = parent_offset;
+        break;
+      } else {
+        break;
+      }
+    }
+  } while (ctx->enc_count);
+  ctx->enc_type = 0;
+  ctx->is_complex = 0;
+  return 0;
+}
+static CYTHON_INLINE PyObject *
+__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
+{
+    const char *ts = *tsp;
+    int i = 0, number;
+    int ndim = ctx->head->field->type->ndim;
+;
+    ++ts;
+    if (ctx->new_count != 1) {
+        PyErr_SetString(PyExc_ValueError,
+                        "Cannot handle repeated arrays in format string");
+        return NULL;
+    }
+    if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+    while (*ts && *ts != ')') {
+        switch (*ts) {
+            case ' ': case '\f': case '\r': case '\n': case '\t': case '\v':  continue;
+            default:  break;  /* not a 'break' in the loop */
+        }
+        number = __Pyx_BufFmt_ExpectNumber(&ts);
+        if (number == -1) return NULL;
+        if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
+            return PyErr_Format(PyExc_ValueError,
+                        "Expected a dimension of size %zu, got %d",
+                        ctx->head->field->type->arraysize[i], number);
+        if (*ts != ',' && *ts != ')')
+            return PyErr_Format(PyExc_ValueError,
+                                "Expected a comma in format string, got '%c'", *ts);
+        if (*ts == ',') ts++;
+        i++;
+    }
+    if (i != ndim)
+        return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
+                            ctx->head->field->type->ndim, i);
+    if (!*ts) {
+        PyErr_SetString(PyExc_ValueError,
+                        "Unexpected end of format string, expected ')'");
+        return NULL;
+    }
+    ctx->is_valid_array = 1;
+    ctx->new_count = 1;
+    *tsp = ++ts;
+    return Py_None;
+}
+static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
+  int got_Z = 0;
+  while (1) {
+    switch(*ts) {
+      case 0:
+        if (ctx->enc_type != 0 && ctx->head == NULL) {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return NULL;
+        }
+        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+        if (ctx->head != NULL) {
+          __Pyx_BufFmt_RaiseExpected(ctx);
+          return NULL;
+        }
+        return ts;
+      case ' ':
+      case '\r':
+      case '\n':
+        ++ts;
+        break;
+      case '<':
+        if (!__Pyx_IsLittleEndian()) {
+          PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
+          return NULL;
+        }
+        ctx->new_packmode = '=';
+        ++ts;
+        break;
+      case '>':
+      case '!':
+        if (__Pyx_IsLittleEndian()) {
+          PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
+          return NULL;
+        }
+        ctx->new_packmode = '=';
+        ++ts;
+        break;
+      case '=':
+      case '@':
+      case '^':
+        ctx->new_packmode = *ts++;
+        break;
+      case 'T': /* substruct */
+        {
+          const char* ts_after_sub;
+          size_t i, struct_count = ctx->new_count;
+          size_t struct_alignment = ctx->struct_alignment;
+          ctx->new_count = 1;
+          ++ts;
+          if (*ts != '{') {
+            PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
+            return NULL;
+          }
+          if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+          ctx->enc_type = 0; /* Erase processed last struct element */
+          ctx->enc_count = 0;
+          ctx->struct_alignment = 0;
+          ++ts;
+          ts_after_sub = ts;
+          for (i = 0; i != struct_count; ++i) {
+            ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
+            if (!ts_after_sub) return NULL;
+          }
+          ts = ts_after_sub;
+          if (struct_alignment) ctx->struct_alignment = struct_alignment;
+        }
+        break;
+      case '}': /* end of substruct; either repeat or move on */
+        {
+          size_t alignment = ctx->struct_alignment;
+          ++ts;
+          if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+          ctx->enc_type = 0; /* Erase processed last struct element */
+          if (alignment && ctx->fmt_offset % alignment) {
+            ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
+          }
+        }
+        return ts;
+      case 'x':
+        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+        ctx->fmt_offset += ctx->new_count;
+        ctx->new_count = 1;
+        ctx->enc_count = 0;
+        ctx->enc_type = 0;
+        ctx->enc_packmode = ctx->new_packmode;
+        ++ts;
+        break;
+      case 'Z':
+        got_Z = 1;
+        ++ts;
+        if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
+          __Pyx_BufFmt_RaiseUnexpectedChar('Z');
+          return NULL;
+        }
+      case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
+      case 'l': case 'L': case 'q': case 'Q':
+      case 'f': case 'd': case 'g':
+      case 'O': case 'p':
+        if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
+            ctx->enc_packmode == ctx->new_packmode) {
+          ctx->enc_count += ctx->new_count;
+          ctx->new_count = 1;
+          got_Z = 0;
+          ++ts;
+          break;
+        }
+      case 's':
+        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
+        ctx->enc_count = ctx->new_count;
+        ctx->enc_packmode = ctx->new_packmode;
+        ctx->enc_type = *ts;
+        ctx->is_complex = got_Z;
+        ++ts;
+        ctx->new_count = 1;
+        got_Z = 0;
+        break;
+      case ':':
+        ++ts;
+        while(*ts != ':') ++ts;
+        ++ts;
+        break;
+      case '(':
+        if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
+        break;
+      default:
+        {
+          int number = __Pyx_BufFmt_ExpectNumber(&ts);
+          if (number == -1) return NULL;
+          ctx->new_count = (size_t)number;
+        }
+    }
+  }
+}
+static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
+  buf->buf = NULL;
+  buf->obj = NULL;
+  buf->strides = __Pyx_zeros;
+  buf->shape = __Pyx_zeros;
+  buf->suboffsets = __Pyx_minusones;
+}
+static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
+        Py_buffer* buf, PyObject* obj,  __Pyx_TypeInfo* dtype, int flags,
+        int nd, int cast, __Pyx_BufFmt_StackElem* stack)
+{
+  if (obj == Py_None || obj == NULL) {
+    __Pyx_ZeroBuffer(buf);
+    return 0;
+  }
+  buf->buf = NULL;
+  if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
+  if (buf->ndim != nd) {
+    PyErr_Format(PyExc_ValueError,
+                 "Buffer has wrong number of dimensions (expected %d, got %d)",
+                 nd, buf->ndim);
+    goto fail;
+  }
+  if (!cast) {
+    __Pyx_BufFmt_Context ctx;
+    __Pyx_BufFmt_Init(&ctx, stack, dtype);
+    if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
+  }
+  if ((unsigned)buf->itemsize != dtype->size) {
+    PyErr_Format(PyExc_ValueError,
+      "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
+      buf->itemsize, (buf->itemsize > 1) ? "s" : "",
+      dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
+    goto fail;
+  }
+  if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
+  return 0;
+fail:;
+  __Pyx_ZeroBuffer(buf);
+  return -1;
+}
+static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
+  if (info->buf == NULL) return;
+  if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
+  __Pyx_ReleaseBuffer(info);
+}
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
+    PyObject *result;
+#if CYTHON_COMPILING_IN_CPYTHON
+    result = PyDict_GetItem(__pyx_d, name);
+    if (result) {
+        Py_INCREF(result);
+    } else {
+#else
+    result = PyObject_GetItem(__pyx_d, name);
+    if (!result) {
+        PyErr_Clear();
+#endif
+        result = __Pyx_GetBuiltinName(name);
+    }
+    return result;
+}
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+    PyObject *result;
+    ternaryfunc call = func->ob_type->tp_call;
+    if (unlikely(!call))
+        return PyObject_Call(func, arg, kw);
+#if PY_VERSION_HEX >= 0x02060000
+    if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+        return NULL;
+#endif
+    result = (*call)(func, arg, kw);
+#if PY_VERSION_HEX >= 0x02060000
+    Py_LeaveRecursiveCall();
+#endif
+    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+        PyErr_SetString(
+            PyExc_SystemError,
+            "NULL result without error in PyObject_Call");
+    }
+    return result;
+}
+#endif
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+    if (unlikely(!type)) {
+        PyErr_SetString(PyExc_SystemError, "Missing type object");
+        return 0;
+    }
+    if (likely(PyObject_TypeCheck(obj, type)))
+        return 1;
+    PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
+                 Py_TYPE(obj)->tp_name, type->tp_name);
+    return 0;
+}
+
+static void __Pyx_RaiseBufferFallbackError(void) {
+  PyErr_SetString(PyExc_ValueError,
+     "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
+}
+
+static void __Pyx_RaiseBufferIndexError(int axis) {
+  PyErr_Format(PyExc_IndexError,
+     "Out of bounds on buffer access (axis %d)", axis);
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+        PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
+        int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    PyMappingMethods* mp;
+#if PY_MAJOR_VERSION < 3
+    PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
+    if (likely(ms && ms->sq_slice)) {
+        if (!has_cstart) {
+            if (_py_start && (*_py_start != Py_None)) {
+                cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
+                if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+            } else
+                cstart = 0;
+        }
+        if (!has_cstop) {
+            if (_py_stop && (*_py_stop != Py_None)) {
+                cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
+                if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+            } else
+                cstop = PY_SSIZE_T_MAX;
+        }
+        if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
+            Py_ssize_t l = ms->sq_length(obj);
+            if (likely(l >= 0)) {
+                if (cstop < 0) {
+                    cstop += l;
+                    if (cstop < 0) cstop = 0;
+                }
+                if (cstart < 0) {
+                    cstart += l;
+                    if (cstart < 0) cstart = 0;
+                }
+            } else {
+                if (PyErr_ExceptionMatches(PyExc_OverflowError))
+                    PyErr_Clear();
+                else
+                    goto bad;
+            }
+        }
+        return ms->sq_slice(obj, cstart, cstop);
+    }
+#endif
+    mp = Py_TYPE(obj)->tp_as_mapping;
+    if (likely(mp && mp->mp_subscript))
+#endif
+    {
+        PyObject* result;
+        PyObject *py_slice, *py_start, *py_stop;
+        if (_py_slice) {
+            py_slice = *_py_slice;
+        } else {
+            PyObject* owned_start = NULL;
+            PyObject* owned_stop = NULL;
+            if (_py_start) {
+                py_start = *_py_start;
+            } else {
+                if (has_cstart) {
+                    owned_start = py_start = PyInt_FromSsize_t(cstart);
+                    if (unlikely(!py_start)) goto bad;
+                } else
+                    py_start = Py_None;
+            }
+            if (_py_stop) {
+                py_stop = *_py_stop;
+            } else {
+                if (has_cstop) {
+                    owned_stop = py_stop = PyInt_FromSsize_t(cstop);
+                    if (unlikely(!py_stop)) {
+                        Py_XDECREF(owned_start);
+                        goto bad;
+                    }
+                } else
+                    py_stop = Py_None;
+            }
+            py_slice = PySlice_New(py_start, py_stop, Py_None);
+            Py_XDECREF(owned_start);
+            Py_XDECREF(owned_stop);
+            if (unlikely(!py_slice)) goto bad;
+        }
+#if CYTHON_COMPILING_IN_CPYTHON
+        result = mp->mp_subscript(obj, py_slice);
+#else
+        result = PyObject_GetItem(obj, py_slice);
+#endif
+        if (!_py_slice) {
+            Py_DECREF(py_slice);
+        }
+        return result;
+    }
+    PyErr_Format(PyExc_TypeError,
+        "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
+bad:
+    return NULL;
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    PyObject *tmp_type, *tmp_value, *tmp_tb;
+    PyThreadState *tstate = PyThreadState_GET();
+    tmp_type = tstate->curexc_type;
+    tmp_value = tstate->curexc_value;
+    tmp_tb = tstate->curexc_traceback;
+    tstate->curexc_type = type;
+    tstate->curexc_value = value;
+    tstate->curexc_traceback = tb;
+    Py_XDECREF(tmp_type);
+    Py_XDECREF(tmp_value);
+    Py_XDECREF(tmp_tb);
+#else
+    PyErr_Restore(type, value, tb);
+#endif
+}
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
+#if CYTHON_COMPILING_IN_CPYTHON
+    PyThreadState *tstate = PyThreadState_GET();
+    *type = tstate->curexc_type;
+    *value = tstate->curexc_value;
+    *tb = tstate->curexc_traceback;
+    tstate->curexc_type = 0;
+    tstate->curexc_value = 0;
+    tstate->curexc_traceback = 0;
+#else
+    PyErr_Fetch(type, value, tb);
+#endif
+}
+
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
+                        CYTHON_UNUSED PyObject *cause) {
+    Py_XINCREF(type);
+    if (!value || value == Py_None)
+        value = NULL;
+    else
+        Py_INCREF(value);
+    if (!tb || tb == Py_None)
+        tb = NULL;
+    else {
+        Py_INCREF(tb);
+        if (!PyTraceBack_Check(tb)) {
+            PyErr_SetString(PyExc_TypeError,
+                "raise: arg 3 must be a traceback or None");
+            goto raise_error;
+        }
+    }
+    #if PY_VERSION_HEX < 0x02050000
+    if (PyClass_Check(type)) {
+    #else
+    if (PyType_Check(type)) {
+    #endif
+#if CYTHON_COMPILING_IN_PYPY
+        if (!value) {
+            Py_INCREF(Py_None);
+            value = Py_None;
+        }
+#endif
+        PyErr_NormalizeException(&type, &value, &tb);
+    } else {
+        if (value) {
+            PyErr_SetString(PyExc_TypeError,
+                "instance exception may not have a separate value");
+            goto raise_error;
+        }
+        value = type;
+        #if PY_VERSION_HEX < 0x02050000
+        if (PyInstance_Check(type)) {
+            type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+            Py_INCREF(type);
+        } else {
+            type = 0;
+            PyErr_SetString(PyExc_TypeError,
+                "raise: exception must be an old-style class or instance");
+            goto raise_error;
+        }
+        #else
+        type = (PyObject*) Py_TYPE(type);
+        Py_INCREF(type);
+        if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+            PyErr_SetString(PyExc_TypeError,
+                "raise: exception class must be a subclass of BaseException");
+            goto raise_error;
+        }
+        #endif
+    }
+    __Pyx_ErrRestore(type, value, tb);
+    return;
+raise_error:
+    Py_XDECREF(value);
+    Py_XDECREF(type);
+    Py_XDECREF(tb);
+    return;
+}
+#else /* Python 3+ */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+    PyObject* owned_instance = NULL;
+    if (tb == Py_None) {
+        tb = 0;
+    } else if (tb && !PyTraceBack_Check(tb)) {
+        PyErr_SetString(PyExc_TypeError,
+            "raise: arg 3 must be a traceback or None");
+        goto bad;
+    }
+    if (value == Py_None)
+        value = 0;
+    if (PyExceptionInstance_Check(type)) {
+        if (value) {
+            PyErr_SetString(PyExc_TypeError,
+                "instance exception may not have a separate value");
+            goto bad;
+        }
+        value = type;
+        type = (PyObject*) Py_TYPE(value);
+    } else if (PyExceptionClass_Check(type)) {
+        PyObject *instance_class = NULL;
+        if (value && PyExceptionInstance_Check(value)) {
+            instance_class = (PyObject*) Py_TYPE(value);
+            if (instance_class != type) {
+                if (PyObject_IsSubclass(instance_class, type)) {
+                    type = instance_class;
+                } else {
+                    instance_class = NULL;
+                }
+            }
+        }
+        if (!instance_class) {
+            PyObject *args;
+            if (!value)
+                args = PyTuple_New(0);
+            else if (PyTuple_Check(value)) {
+                Py_INCREF(value);
+                args = value;
+            } else
+                args = PyTuple_Pack(1, value);
+            if (!args)
+                goto bad;
+            owned_instance = PyObject_Call(type, args, NULL);
+            Py_DECREF(args);
+            if (!owned_instance)
+                goto bad;
+            value = owned_instance;
+            if (!PyExceptionInstance_Check(value)) {
+                PyErr_Format(PyExc_TypeError,
+                             "calling %R should have returned an instance of "
+                             "BaseException, not %R",
+                             type, Py_TYPE(value));
+                goto bad;
+            }
+        }
+    } else {
+        PyErr_SetString(PyExc_TypeError,
+            "raise: exception class must be a subclass of BaseException");
+        goto bad;
+    }
+#if PY_VERSION_HEX >= 0x03030000
+    if (cause) {
+#else
+    if (cause && cause != Py_None) {
+#endif
+        PyObject *fixed_cause;
+        if (cause == Py_None) {
+            fixed_cause = NULL;
+        } else if (PyExceptionClass_Check(cause)) {
+            fixed_cause = PyObject_CallObject(cause, NULL);
+            if (fixed_cause == NULL)
+                goto bad;
+        } else if (PyExceptionInstance_Check(cause)) {
+            fixed_cause = cause;
+            Py_INCREF(fixed_cause);
+        } else {
+            PyErr_SetString(PyExc_TypeError,
+                            "exception causes must derive from "
+                            "BaseException");
+            goto bad;
+        }
+        PyException_SetCause(value, fixed_cause);
+    }
+    PyErr_SetObject(type, value);
+    if (tb) {
+        PyThreadState *tstate = PyThreadState_GET();
+        PyObject* tmp_tb = tstate->curexc_traceback;
+        if (tb != tmp_tb) {
+            Py_INCREF(tb);
+            tstate->curexc_traceback = tb;
+            Py_XDECREF(tmp_tb);
+        }
+    }
+bad:
+    Py_XDECREF(owned_instance);
+    return;
+}
+#endif
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
+    PyErr_Format(PyExc_ValueError,
+                 "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
+}
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
+    PyErr_Format(PyExc_ValueError,
+                 "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
+                 index, (index == 1) ? "" : "s");
+}
+
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
+    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+}
+
+#if PY_MAJOR_VERSION < 3
+static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
+  #if PY_VERSION_HEX >= 0x02060000
+    if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
+  #endif
+        if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
+  #if PY_VERSION_HEX < 0x02060000
+    if (obj->ob_type->tp_dict) {
+        PyObject *getbuffer_cobj = PyObject_GetItem(
+            obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer);
+        if (getbuffer_cobj) {
+            getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj);
+            Py_DECREF(getbuffer_cobj);
+            if (!func)
+                goto fail;
+            return func(obj, view, flags);
+        } else {
+            PyErr_Clear();
+        }
+    }
+  #endif
+    PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
+#if PY_VERSION_HEX < 0x02060000
+fail:
+#endif
+    return -1;
+}
+static void __Pyx_ReleaseBuffer(Py_buffer *view) {
+    PyObject *obj = view->obj;
+    if (!obj) return;
+  #if PY_VERSION_HEX >= 0x02060000
+    if (PyObject_CheckBuffer(obj)) {
+        PyBuffer_Release(view);
+        return;
+    }
+  #endif
+        if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
+  #if PY_VERSION_HEX < 0x02060000
+    if (obj->ob_type->tp_dict) {
+        PyObject *releasebuffer_cobj = PyObject_GetItem(
+            obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer);
+        if (releasebuffer_cobj) {
+            releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj);
+            Py_DECREF(releasebuffer_cobj);
+            if (!func)
+                goto fail;
+            func(obj, view);
+            return;
+        } else {
+            PyErr_Clear();
+        }
+    }
+  #endif
+    goto nofail;
+#if PY_VERSION_HEX < 0x02060000
+fail:
+#endif
+    PyErr_WriteUnraisable(obj);
+nofail:
+    Py_DECREF(obj);
+    view->obj = NULL;
+}
+#endif /*  PY_MAJOR_VERSION < 3 */
+
+
+        static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+    PyObject *empty_list = 0;
+    PyObject *module = 0;
+    PyObject *global_dict = 0;
+    PyObject *empty_dict = 0;
+    PyObject *list;
+    #if PY_VERSION_HEX < 0x03030000
+    PyObject *py_import;
+    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+    if (!py_import)
+        goto bad;
+    #endif
+    if (from_list)
+        list = from_list;
+    else {
+        empty_list = PyList_New(0);
+        if (!empty_list)
+            goto bad;
+        list = empty_list;
+    }
+    global_dict = PyModule_GetDict(__pyx_m);
+    if (!global_dict)
+        goto bad;
+    empty_dict = PyDict_New();
+    if (!empty_dict)
+        goto bad;
+    #if PY_VERSION_HEX >= 0x02050000
+    {
+        #if PY_MAJOR_VERSION >= 3
+        if (level == -1) {
+            if (strchr(__Pyx_MODULE_NAME, '.')) {
+                #if PY_VERSION_HEX < 0x03030000
+                PyObject *py_level = PyInt_FromLong(1);
+                if (!py_level)
+                    goto bad;
+                module = PyObject_CallFunctionObjArgs(py_import,
+                    name, global_dict, empty_dict, list, py_level, NULL);
+                Py_DECREF(py_level);
+                #else
+                module = PyImport_ImportModuleLevelObject(
+                    name, global_dict, empty_dict, list, 1);
+                #endif
+                if (!module) {
+                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
+                        goto bad;
+                    PyErr_Clear();
+                }
+            }
+            level = 0; /* try absolute import on failure */
+        }
+        #endif
+        if (!module) {
+            #if PY_VERSION_HEX < 0x03030000
+            PyObject *py_level = PyInt_FromLong(level);
+            if (!py_level)
+                goto bad;
+            module = PyObject_CallFunctionObjArgs(py_import,
+                name, global_dict, empty_dict, list, py_level, NULL);
+            Py_DECREF(py_level);
+            #else
+            module = PyImport_ImportModuleLevelObject(
+                name, global_dict, empty_dict, list, level);
+            #endif
+        }
+    }
+    #else
+    if (level>0) {
+        PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
+        goto bad;
+    }
+    module = PyObject_CallFunctionObjArgs(py_import,
+        name, global_dict, empty_dict, list, NULL);
+    #endif
+bad:
+    #if PY_VERSION_HEX < 0x03030000
+    Py_XDECREF(py_import);
+    #endif
+    Py_XDECREF(empty_list);
+    Py_XDECREF(empty_dict);
+    return module;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
+    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(Py_intptr_t) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(Py_intptr_t) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(Py_intptr_t) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t),
+                                     little, !is_unsigned);
+    }
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
+    const npy_int64 neg_one = (npy_int64) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(npy_int64) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(npy_int64) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(npy_int64) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(npy_int64) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(npy_int64) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(npy_int64),
+                                     little, !is_unsigned);
+    }
+}
+
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func)             \
+    {                                                                     \
+        func_type value = func(x);                                        \
+        if (sizeof(target_type) < sizeof(func_type)) {                    \
+            if (unlikely(value != (func_type) (target_type) value)) {     \
+                func_type zero = 0;                                       \
+                PyErr_SetString(PyExc_OverflowError,                      \
+                    (is_unsigned && unlikely(value < zero)) ?             \
+                    "can't convert negative value to " #target_type :     \
+                    "value too large to convert to " #target_type);       \
+                return (target_type) -1;                                  \
+            }                                                             \
+        }                                                                 \
+        return (target_type) value;                                       \
+    }
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
+    const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(Py_intptr_t) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to Py_intptr_t");
+                return (Py_intptr_t) -1;
+            }
+            return (Py_intptr_t) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(Py_intptr_t)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (Py_intptr_t) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to Py_intptr_t");
+                return (Py_intptr_t) -1;
+            }
+            if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(Py_intptr_t)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(Py_intptr_t) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(Py_intptr_t) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(Py_intptr_t) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyLong_AsLong)
+            } else if (sizeof(Py_intptr_t) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            Py_intptr_t val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (Py_intptr_t) -1;
+        }
+    } else {
+        Py_intptr_t val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (Py_intptr_t) -1;
+        val = __Pyx_PyInt_As_Py_intptr_t(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
+      return ::std::complex< float >(x, y);
+    }
+  #else
+    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
+      return x + y*(__pyx_t_float_complex)_Complex_I;
+    }
+  #endif
+#else
+    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
+      __pyx_t_float_complex z;
+      z.real = x;
+      z.imag = y;
+      return z;
+    }
+#endif
+
+#if CYTHON_CCOMPLEX
+#else
+    static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+       return (a.real == b.real) && (a.imag == b.imag);
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        z.real = a.real + b.real;
+        z.imag = a.imag + b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        z.real = a.real - b.real;
+        z.imag = a.imag - b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        z.real = a.real * b.real - a.imag * b.imag;
+        z.imag = a.real * b.imag + a.imag * b.real;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+        __pyx_t_float_complex z;
+        float denom = b.real * b.real + b.imag * b.imag;
+        z.real = (a.real * b.real + a.imag * b.imag) / denom;
+        z.imag = (a.imag * b.real - a.real * b.imag) / denom;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
+        __pyx_t_float_complex z;
+        z.real = -a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
+       return (a.real == 0) && (a.imag == 0);
+    }
+    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
+        __pyx_t_float_complex z;
+        z.real =  a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    #if 1
+        static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
+          #if !defined(HAVE_HYPOT) || defined(_MSC_VER)
+            return sqrtf(z.real*z.real + z.imag*z.imag);
+          #else
+            return hypotf(z.real, z.imag);
+          #endif
+        }
+        static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
+            __pyx_t_float_complex z;
+            float r, lnr, theta, z_r, z_theta;
+            if (b.imag == 0 && b.real == (int)b.real) {
+                if (b.real < 0) {
+                    float denom = a.real * a.real + a.imag * a.imag;
+                    a.real = a.real / denom;
+                    a.imag = -a.imag / denom;
+                    b.real = -b.real;
+                }
+                switch ((int)b.real) {
+                    case 0:
+                        z.real = 1;
+                        z.imag = 0;
+                        return z;
+                    case 1:
+                        return a;
+                    case 2:
+                        z = __Pyx_c_prodf(a, a);
+                        return __Pyx_c_prodf(a, a);
+                    case 3:
+                        z = __Pyx_c_prodf(a, a);
+                        return __Pyx_c_prodf(z, a);
+                    case 4:
+                        z = __Pyx_c_prodf(a, a);
+                        return __Pyx_c_prodf(z, z);
+                }
+            }
+            if (a.imag == 0) {
+                if (a.real == 0) {
+                    return a;
+                }
+                r = a.real;
+                theta = 0;
+            } else {
+                r = __Pyx_c_absf(a);
+                theta = atan2f(a.imag, a.real);
+            }
+            lnr = logf(r);
+            z_r = expf(lnr * b.real - theta * b.imag);
+            z_theta = theta * b.real + lnr * b.imag;
+            z.real = z_r * cosf(z_theta);
+            z.imag = z_r * sinf(z_theta);
+            return z;
+        }
+    #endif
+#endif
+
+#if CYTHON_CCOMPLEX
+  #ifdef __cplusplus
+    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
+      return ::std::complex< double >(x, y);
+    }
+  #else
+    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
+      return x + y*(__pyx_t_double_complex)_Complex_I;
+    }
+  #endif
+#else
+    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
+      __pyx_t_double_complex z;
+      z.real = x;
+      z.imag = y;
+      return z;
+    }
+#endif
+
+#if CYTHON_CCOMPLEX
+#else
+    static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+       return (a.real == b.real) && (a.imag == b.imag);
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        z.real = a.real + b.real;
+        z.imag = a.imag + b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        z.real = a.real - b.real;
+        z.imag = a.imag - b.imag;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        z.real = a.real * b.real - a.imag * b.imag;
+        z.imag = a.real * b.imag + a.imag * b.real;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+        __pyx_t_double_complex z;
+        double denom = b.real * b.real + b.imag * b.imag;
+        z.real = (a.real * b.real + a.imag * b.imag) / denom;
+        z.imag = (a.imag * b.real - a.real * b.imag) / denom;
+        return z;
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
+        __pyx_t_double_complex z;
+        z.real = -a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
+       return (a.real == 0) && (a.imag == 0);
+    }
+    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
+        __pyx_t_double_complex z;
+        z.real =  a.real;
+        z.imag = -a.imag;
+        return z;
+    }
+    #if 1
+        static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
+          #if !defined(HAVE_HYPOT) || defined(_MSC_VER)
+            return sqrt(z.real*z.real + z.imag*z.imag);
+          #else
+            return hypot(z.real, z.imag);
+          #endif
+        }
+        static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
+            __pyx_t_double_complex z;
+            double r, lnr, theta, z_r, z_theta;
+            if (b.imag == 0 && b.real == (int)b.real) {
+                if (b.real < 0) {
+                    double denom = a.real * a.real + a.imag * a.imag;
+                    a.real = a.real / denom;
+                    a.imag = -a.imag / denom;
+                    b.real = -b.real;
+                }
+                switch ((int)b.real) {
+                    case 0:
+                        z.real = 1;
+                        z.imag = 0;
+                        return z;
+                    case 1:
+                        return a;
+                    case 2:
+                        z = __Pyx_c_prod(a, a);
+                        return __Pyx_c_prod(a, a);
+                    case 3:
+                        z = __Pyx_c_prod(a, a);
+                        return __Pyx_c_prod(z, a);
+                    case 4:
+                        z = __Pyx_c_prod(a, a);
+                        return __Pyx_c_prod(z, z);
+                }
+            }
+            if (a.imag == 0) {
+                if (a.real == 0) {
+                    return a;
+                }
+                r = a.real;
+                theta = 0;
+            } else {
+                r = __Pyx_c_abs(a);
+                theta = atan2(a.imag, a.real);
+            }
+            lnr = log(r);
+            z_r = exp(lnr * b.real - theta * b.imag);
+            z_theta = theta * b.real + lnr * b.imag;
+            z.real = z_r * cos(z_theta);
+            z.imag = z_r * sin(z_theta);
+            return z;
+        }
+    #endif
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
+    const int neg_one = (int) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(int) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(int) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(int) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(int) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(int) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(int),
+                                     little, !is_unsigned);
+    }
+}
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+    const int neg_one = (int) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(int) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to int");
+                return (int) -1;
+            }
+            return (int) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(int)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (int) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to int");
+                return (int) -1;
+            }
+            if (sizeof(int) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(int) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(int)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(int) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(int) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(int) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong)
+            } else if (sizeof(int) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            int val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (int) -1;
+        }
+    } else {
+        int val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (int) -1;
+        val = __Pyx_PyInt_As_int(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+    const long neg_one = (long) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+    if (is_unsigned) {
+        if (sizeof(long) < sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(long) <= sizeof(unsigned long)) {
+            return PyLong_FromUnsignedLong((unsigned long) value);
+        } else if (sizeof(long) <= sizeof(unsigned long long)) {
+            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        }
+    } else {
+        if (sizeof(long) <= sizeof(long)) {
+            return PyInt_FromLong((long) value);
+        } else if (sizeof(long) <= sizeof(long long)) {
+            return PyLong_FromLongLong((long long) value);
+        }
+    }
+    {
+        int one = 1; int little = (int)*(unsigned char *)&one;
+        unsigned char *bytes = (unsigned char *)&value;
+        return _PyLong_FromByteArray(bytes, sizeof(long),
+                                     little, !is_unsigned);
+    }
+}
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+    const long neg_one = (long) -1, const_zero = 0;
+    const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+    if (likely(PyInt_Check(x))) {
+        if (sizeof(long) < sizeof(long)) {
+            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG)
+        } else {
+            long val = PyInt_AS_LONG(x);
+            if (is_unsigned && unlikely(val < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to long");
+                return (long) -1;
+            }
+            return (long) val;
+        }
+    } else
+#endif
+    if (likely(PyLong_Check(x))) {
+        if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(long)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return (long) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (unlikely(Py_SIZE(x) < 0)) {
+                PyErr_SetString(PyExc_OverflowError,
+                                "can't convert negative value to long");
+                return (long) -1;
+            }
+            if (sizeof(long) <= sizeof(unsigned long)) {
+                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong)
+            } else if (sizeof(long) <= sizeof(unsigned long long)) {
+                __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong)
+            }
+        } else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+            if (sizeof(digit) <= sizeof(long)) {
+                switch (Py_SIZE(x)) {
+                    case  0: return 0;
+                    case  1: return +(long) ((PyLongObject*)x)->ob_digit[0];
+                    case -1: return -(long) ((PyLongObject*)x)->ob_digit[0];
+                }
+            }
+ #endif
+#endif
+            if (sizeof(long) <= sizeof(long)) {
+                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong)
+            } else if (sizeof(long) <= sizeof(long long)) {
+                __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong)
+            }
+        }
+        {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+            PyErr_SetString(PyExc_RuntimeError,
+                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+            long val;
+            PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_MAJOR_VERSION < 3
+            if (likely(v) && !PyLong_Check(v)) {
+                PyObject *tmp = v;
+                v = PyNumber_Long(tmp);
+                Py_DECREF(tmp);
+            }
+ #endif
+            if (likely(v)) {
+                int one = 1; int is_little = (int)*(unsigned char *)&one;
+                unsigned char *bytes = (unsigned char *)&val;
+                int ret = _PyLong_AsByteArray((PyLongObject *)v,
+                                              bytes, sizeof(val),
+                                              is_little, !is_unsigned);
+                Py_DECREF(v);
+                if (likely(!ret))
+                    return val;
+            }
+#endif
+            return (long) -1;
+        }
+    } else {
+        long val;
+        PyObject *tmp = __Pyx_PyNumber_Int(x);
+        if (!tmp) return (long) -1;
+        val = __Pyx_PyInt_As_long(tmp);
+        Py_DECREF(tmp);
+        return val;
+    }
+}
+
+static int __Pyx_check_binary_version(void) {
+    char ctversion[4], rtversion[4];
+    PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+    PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+    if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+        char message[200];
+        PyOS_snprintf(message, sizeof(message),
+                      "compiletime version %s of module '%.100s' "
+                      "does not match runtime version %s",
+                      ctversion, __Pyx_MODULE_NAME, rtversion);
+        #if PY_VERSION_HEX < 0x02050000
+        return PyErr_Warn(NULL, message);
+        #else
+        return PyErr_WarnEx(NULL, message, 1);
+        #endif
+    }
+    return 0;
+}
+
+#ifndef __PYX_HAVE_RT_ImportModule
+#define __PYX_HAVE_RT_ImportModule
+static PyObject *__Pyx_ImportModule(const char *name) {
+    PyObject *py_name = 0;
+    PyObject *py_module = 0;
+    py_name = __Pyx_PyIdentifier_FromString(name);
+    if (!py_name)
+        goto bad;
+    py_module = PyImport_Import(py_name);
+    Py_DECREF(py_name);
+    return py_module;
+bad:
+    Py_XDECREF(py_name);
+    return 0;
+}
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
+    size_t size, int strict)
+{
+    PyObject *py_module = 0;
+    PyObject *result = 0;
+    PyObject *py_name = 0;
+    char warning[200];
+    Py_ssize_t basicsize;
+#ifdef Py_LIMITED_API
+    PyObject *py_basicsize;
+#endif
+    py_module = __Pyx_ImportModule(module_name);
+    if (!py_module)
+        goto bad;
+    py_name = __Pyx_PyIdentifier_FromString(class_name);
+    if (!py_name)
+        goto bad;
+    result = PyObject_GetAttr(py_module, py_name);
+    Py_DECREF(py_name);
+    py_name = 0;
+    Py_DECREF(py_module);
+    py_module = 0;
+    if (!result)
+        goto bad;
+    if (!PyType_Check(result)) {
+        PyErr_Format(PyExc_TypeError,
+            "%.200s.%.200s is not a type object",
+            module_name, class_name);
+        goto bad;
+    }
+#ifndef Py_LIMITED_API
+    basicsize = ((PyTypeObject *)result)->tp_basicsize;
+#else
+    py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
+    if (!py_basicsize)
+        goto bad;
+    basicsize = PyLong_AsSsize_t(py_basicsize);
+    Py_DECREF(py_basicsize);
+    py_basicsize = 0;
+    if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
+        goto bad;
+#endif
+    if (!strict && (size_t)basicsize > size) {
+        PyOS_snprintf(warning, sizeof(warning),
+            "%s.%s size changed, may indicate binary incompatibility",
+            module_name, class_name);
+        #if PY_VERSION_HEX < 0x02050000
+        if (PyErr_Warn(NULL, warning) < 0) goto bad;
+        #else
+        if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
+        #endif
+    }
+    else if ((size_t)basicsize != size) {
+        PyErr_Format(PyExc_ValueError,
+            "%.200s.%.200s has the wrong size, try recompiling",
+            module_name, class_name);
+        goto bad;
+    }
+    return (PyTypeObject *)result;
+bad:
+    Py_XDECREF(py_module);
+    Py_XDECREF(result);
+    return NULL;
+}
+#endif
+
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+    int start = 0, mid = 0, end = count - 1;
+    if (end >= 0 && code_line > entries[end].code_line) {
+        return count;
+    }
+    while (start < end) {
+        mid = (start + end) / 2;
+        if (code_line < entries[mid].code_line) {
+            end = mid;
+        } else if (code_line > entries[mid].code_line) {
+             start = mid + 1;
+        } else {
+            return mid;
+        }
+    }
+    if (code_line <= entries[mid].code_line) {
+        return mid;
+    } else {
+        return mid + 1;
+    }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+    PyCodeObject* code_object;
+    int pos;
+    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+        return NULL;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+        return NULL;
+    }
+    code_object = __pyx_code_cache.entries[pos].code_object;
+    Py_INCREF(code_object);
+    return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+    int pos, i;
+    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+    if (unlikely(!code_line)) {
+        return;
+    }
+    if (unlikely(!entries)) {
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (likely(entries)) {
+            __pyx_code_cache.entries = entries;
+            __pyx_code_cache.max_count = 64;
+            __pyx_code_cache.count = 1;
+            entries[0].code_line = code_line;
+            entries[0].code_object = code_object;
+            Py_INCREF(code_object);
+        }
+        return;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+        PyCodeObject* tmp = entries[pos].code_object;
+        entries[pos].code_object = code_object;
+        Py_DECREF(tmp);
+        return;
+    }
+    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+        int new_max = __pyx_code_cache.max_count + 64;
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+            __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (unlikely(!entries)) {
+            return;
+        }
+        __pyx_code_cache.entries = entries;
+        __pyx_code_cache.max_count = new_max;
+    }
+    for (i=__pyx_code_cache.count; i>pos; i--) {
+        entries[i] = entries[i-1];
+    }
+    entries[pos].code_line = code_line;
+    entries[pos].code_object = code_object;
+    __pyx_code_cache.count++;
+    Py_INCREF(code_object);
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+            const char *funcname, int c_line,
+            int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyObject *py_srcfile = 0;
+    PyObject *py_funcname = 0;
+    #if PY_MAJOR_VERSION < 3
+    py_srcfile = PyString_FromString(filename);
+    #else
+    py_srcfile = PyUnicode_FromString(filename);
+    #endif
+    if (!py_srcfile) goto bad;
+    if (c_line) {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #else
+        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #endif
+    }
+    else {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromString(funcname);
+        #else
+        py_funcname = PyUnicode_FromString(funcname);
+        #endif
+    }
+    if (!py_funcname) goto bad;
+    py_code = __Pyx_PyCode_New(
+        0,            /*int argcount,*/
+        0,            /*int kwonlyargcount,*/
+        0,            /*int nlocals,*/
+        0,            /*int stacksize,*/
+        0,            /*int flags,*/
+        __pyx_empty_bytes, /*PyObject *code,*/
+        __pyx_empty_tuple, /*PyObject *consts,*/
+        __pyx_empty_tuple, /*PyObject *names,*/
+        __pyx_empty_tuple, /*PyObject *varnames,*/
+        __pyx_empty_tuple, /*PyObject *freevars,*/
+        __pyx_empty_tuple, /*PyObject *cellvars,*/
+        py_srcfile,   /*PyObject *filename,*/
+        py_funcname,  /*PyObject *name,*/
+        py_line,      /*int firstlineno,*/
+        __pyx_empty_bytes  /*PyObject *lnotab*/
+    );
+    Py_DECREF(py_srcfile);
+    Py_DECREF(py_funcname);
+    return py_code;
+bad:
+    Py_XDECREF(py_srcfile);
+    Py_XDECREF(py_funcname);
+    return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyObject *py_globals = 0;
+    PyFrameObject *py_frame = 0;
+    py_code = __pyx_find_code_object(c_line ? c_line : py_line);
+    if (!py_code) {
+        py_code = __Pyx_CreateCodeObjectForTraceback(
+            funcname, c_line, py_line, filename);
+        if (!py_code) goto bad;
+        __pyx_insert_code_object(c_line ? c_line : py_line, py_code);
+    }
+    py_globals = PyModule_GetDict(__pyx_m);
+    if (!py_globals) goto bad;
+    py_frame = PyFrame_New(
+        PyThreadState_GET(), /*PyThreadState *tstate,*/
+        py_code,             /*PyCodeObject *code,*/
+        py_globals,          /*PyObject *globals,*/
+        0                    /*PyObject *locals*/
+    );
+    if (!py_frame) goto bad;
+    py_frame->f_lineno = py_line;
+    PyTraceBack_Here(py_frame);
+bad:
+    Py_XDECREF(py_code);
+    Py_XDECREF(py_frame);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+    while (t->p) {
+        #if PY_MAJOR_VERSION < 3
+        if (t->is_unicode) {
+            *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+        } else if (t->intern) {
+            *t->p = PyString_InternFromString(t->s);
+        } else {
+            *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+        }
+        #else  /* Python 3+ has unicode identifiers */
+        if (t->is_unicode | t->is_str) {
+            if (t->intern) {
+                *t->p = PyUnicode_InternFromString(t->s);
+            } else if (t->encoding) {
+                *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+            } else {
+                *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+            }
+        } else {
+            *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+        }
+        #endif
+        if (!*t->p)
+            return -1;
+        ++t;
+    }
+    return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+    return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
+    Py_ssize_t ignore;
+    return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+    if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+            __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+            PyUnicode_Check(o)) {
+#if PY_VERSION_HEX < 0x03030000
+        char* defenc_c;
+        PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+        if (!defenc) return NULL;
+        defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+        {
+            char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+            char* c;
+            for (c = defenc_c; c < end; c++) {
+                if ((unsigned char) (*c) >= 128) {
+                    PyUnicode_AsASCIIString(o);
+                    return NULL;
+                }
+            }
+        }
+#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
+        *length = PyBytes_GET_SIZE(defenc);
+        return defenc_c;
+#else /* PY_VERSION_HEX < 0x03030000 */
+        if (PyUnicode_READY(o) == -1) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+        if (PyUnicode_IS_ASCII(o)) {
+            *length = PyUnicode_GET_LENGTH(o);
+            return PyUnicode_AsUTF8(o);
+        } else {
+            PyUnicode_AsASCIIString(o);
+            return NULL;
+        }
+#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+        return PyUnicode_AsUTF8AndSize(o, length);
+#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+#endif /* PY_VERSION_HEX < 0x03030000 */
+    } else
+#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII  || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
+#if !CYTHON_COMPILING_IN_PYPY
+#if PY_VERSION_HEX >= 0x02060000
+    if (PyByteArray_Check(o)) {
+        *length = PyByteArray_GET_SIZE(o);
+        return PyByteArray_AS_STRING(o);
+    } else
+#endif
+#endif
+    {
+        char* result;
+        int r = PyBytes_AsStringAndSize(o, &result, length);
+        if (unlikely(r < 0)) {
+            return NULL;
+        } else {
+            return result;
+        }
+    }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+   int is_true = x == Py_True;
+   if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+   else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
+  PyNumberMethods *m;
+  const char *name = NULL;
+  PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+  if (PyInt_Check(x) || PyLong_Check(x))
+#else
+  if (PyLong_Check(x))
+#endif
+    return Py_INCREF(x), x;
+  m = Py_TYPE(x)->tp_as_number;
+#if PY_MAJOR_VERSION < 3
+  if (m && m->nb_int) {
+    name = "int";
+    res = PyNumber_Int(x);
+  }
+  else if (m && m->nb_long) {
+    name = "long";
+    res = PyNumber_Long(x);
+  }
+#else
+  if (m && m->nb_int) {
+    name = "int";
+    res = PyNumber_Long(x);
+  }
+#endif
+  if (res) {
+#if PY_MAJOR_VERSION < 3
+    if (!PyInt_Check(res) && !PyLong_Check(res)) {
+#else
+    if (!PyLong_Check(res)) {
+#endif
+      PyErr_Format(PyExc_TypeError,
+                   "__%.4s__ returned non-%.4s (type %.200s)",
+                   name, name, Py_TYPE(res)->tp_name);
+      Py_DECREF(res);
+      return NULL;
+    }
+  }
+  else if (!PyErr_Occurred()) {
+    PyErr_SetString(PyExc_TypeError,
+                    "an integer is required");
+  }
+  return res;
+}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+ #if CYTHON_USE_PYLONG_INTERNALS
+  #include "longintrepr.h"
+ #endif
+#endif
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+  Py_ssize_t ival;
+  PyObject *x;
+#if PY_MAJOR_VERSION < 3
+  if (likely(PyInt_CheckExact(b)))
+      return PyInt_AS_LONG(b);
+#endif
+  if (likely(PyLong_CheckExact(b))) {
+    #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+     #if CYTHON_USE_PYLONG_INTERNALS
+       switch (Py_SIZE(b)) {
+       case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
+       case  0: return 0;
+       case  1: return ((PyLongObject*)b)->ob_digit[0];
+       }
+     #endif
+    #endif
+  #if PY_VERSION_HEX < 0x02060000
+    return PyInt_AsSsize_t(b);
+  #else
+    return PyLong_AsSsize_t(b);
+  #endif
+  }
+  x = PyNumber_Index(b);
+  if (!x) return -1;
+  ival = PyInt_AsSsize_t(x);
+  Py_DECREF(x);
+  return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+#if PY_VERSION_HEX < 0x02050000
+   if (ival <= LONG_MAX)
+       return PyInt_FromLong((long)ival);
+   else {
+       unsigned char *bytes = (unsigned char *) &ival;
+       int one = 1; int little = (int)*(unsigned char*)&one;
+       return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
+   }
+#else
+   return PyInt_FromSize_t(ival);
+#endif
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/skbio/stats/__subsample.pyx b/skbio/stats/__subsample.pyx
new file mode 100644
index 0000000..4a04943
--- /dev/null
+++ b/skbio/stats/__subsample.pyx
@@ -0,0 +1,36 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+cimport numpy as cnp
+
+
+def _subsample_counts_without_replacement(
+    cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
+    cdef:
+        cnp.ndarray[cnp.int64_t, ndim=1] result, permuted, unpacked
+        cnp.int64_t cnt
+        Py_ssize_t unpacked_idx, i, j
+
+    unpacked = np.empty(counts_sum, dtype=int)
+    unpacked_idx = 0
+    for i in range(counts.shape[0]):
+        cnt = counts[i]
+        for j in range(cnt):
+            unpacked[unpacked_idx] = i
+            unpacked_idx += 1
+
+    permuted = np.random.permutation(unpacked)[:n]
+
+    result = np.zeros_like(counts)
+    for idx in range(permuted.shape[0]):
+        result[permuted[idx]] += 1
+
+    return result
diff --git a/skbio/stats/_misc.py b/skbio/stats/_misc.py
new file mode 100644
index 0000000..14d5684
--- /dev/null
+++ b/skbio/stats/_misc.py
@@ -0,0 +1,74 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import warnings
+
+import numpy as np
+
+
+def p_value_to_str(p_value, permutations):
+    """Format p-value as a string with the correct number of decimals.
+
+    .. note:: Deprecated in scikit-bio 0.2.1-dev
+       ``p_value_to_str`` will be removed in scikit-bio 0.3.0.
+       Permutation-based p-values in scikit-bio are calculated as
+       ``(num_extreme + 1) / (num_permutations + 1)``, so it is impossible to
+       obtain a p-value of zero. This function historically existed for
+       correcting the number of digits displayed when obtaining a p-value of
+       zero. Since this is no longer possible, this functionality will be
+       removed.
+
+    Number of decimals is determined by the number of permutations.
+
+    Parameters
+    ----------
+    p_value : float or None
+        p-value to convert to string.
+    permutations : int
+        Number of permutations used to calculate `p_value`.
+
+    Returns
+    -------
+    str
+        `p_value` formatted as a string with the correct number of decimals. If
+        `p_value` is ``None`` or ``np.nan``, ``'N/A'`` is returned. If
+        `permutations` is less than 10, a message stating insufficient number
+        of permutations is returned.
+    """
+    warnings.warn(
+        "skbio.stats.p_value_to_str is deprecated and will be removed in "
+        "scikit-bio 0.3.0. There are no plans to provide a replacement for "
+        "this functionality.", DeprecationWarning)
+
+    if p_value is None or np.isnan(p_value):
+        result = 'N/A'
+    elif permutations < 10:
+        result = ('Too few permutations to compute p-value (permutations '
+                  '= %d)' % permutations)
+    else:
+        decimal_places = int(np.log10(permutations + 1))
+        result = ('%1.' + '%df' % decimal_places) % p_value
+
+    return result
+
+
+def _pprint_strs(strs, max_chars=80, delimiter=', ', suffix='...',):
+    """Pretty-print an iterable of strings, truncating if necessary."""
+    # Adapted from http://stackoverflow.com/a/250373
+    joined_str = delimiter.join(repr(s) for s in strs)
+
+    if len(joined_str) > max_chars:
+        truncated = joined_str[:max_chars + 1].split(delimiter)[0:-1]
+        joined_str = delimiter.join(truncated)
+        if joined_str:
+            joined_str += delimiter
+        joined_str += suffix
+
+    return joined_str
diff --git a/skbio/stats/_subsample.py b/skbio/stats/_subsample.py
new file mode 100644
index 0000000..e2190a7
--- /dev/null
+++ b/skbio/stats/_subsample.py
@@ -0,0 +1,325 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import viewitems
+
+import sys
+from warnings import warn
+from heapq import heappush, heappop
+from collections import defaultdict
+from copy import copy
+
+import numpy as np
+
+from skbio.util import EfficiencyWarning
+try:
+    from .__subsample import _subsample_counts_without_replacement
+except ImportError:
+    pass
+
+
+def isubsample(items, maximum, minimum=1, buf_size=1000, bin_f=None):
+    """Randomly subsample items from bins, without replacement.
+
+    Randomly subsample items without replacement from an unknown number of
+    input items, that may fall into an unknown number of bins. This method is
+    intended for data that either a) cannot fit into memory or b) subsampling
+    collections of arbitrary datatypes.
+
+    Parameters
+    ----------
+    items : Iterable
+        The items to evaluate.
+    maximum : unsigned int
+        The maximum number of items per bin.
+    minimum : unsigned int, optional
+        The minimum number of items per bin. The default is 1.
+    buf_size : unsigned int, optional
+        The size of the random value buffer. This buffer holds the random
+        values assigned to each item from items. In practice, it is unlikely
+        that this value will need to change. Increasing it will require more
+        resident memory, but potentially reduce the number of function calls
+        made to the PRNG, whereas decreasing it will result in more function
+        calls and lower memory overhead. The default is 1000.
+    bin_f : function, optional
+        Method to determine what bin an item is associated with. If None (the
+        default), then all items are considered to be part of the same bin.
+        This function will be provided with each entry in items, and must
+        return a hashable value indicating the bin that that entry should be
+        placed in.
+
+    Returns
+    -------
+    generator
+        (bin, item)
+
+    Raises
+    ------
+    ValueError
+        If ``minimum`` is > ``maximum``.
+    ValueError
+        If ``minimum`` < 1 or if ``maximum`` < 1.
+
+    See Also
+    --------
+    subsample_counts
+
+    Notes
+    -----
+    Randomly get up to ``maximum`` items for each bin. If the bin has less than
+    ``maximum``, only those bins that have >= ``minimum`` items are
+    returned.
+
+    This method will at most hold ``maximum`` * N data, where N is the number
+    of bins.
+
+    All items associated to a bin have an equal probability of being retained.
+
+    Examples
+    --------
+    Randomly keep up to 2 sequences per sample from a set of demultiplexed
+    sequences:
+
+    >>> from skbio.stats import isubsample
+    >>> import numpy as np
+    >>> np.random.seed(123)
+    >>> seqs = [('sampleA', 'AATTGG'),
+    ...         ('sampleB', 'ATATATAT'),
+    ...         ('sampleC', 'ATGGCC'),
+    ...         ('sampleB', 'ATGGCT'),
+    ...         ('sampleB', 'ATGGCG'),
+    ...         ('sampleA', 'ATGGCA')]
+    >>> bin_f = lambda item: item[0]
+    >>> for bin_, item in sorted(isubsample(seqs, 2, bin_f=bin_f)):
+    ...     print(bin_, item[1])
+    sampleA AATTGG
+    sampleA ATGGCA
+    sampleB ATATATAT
+    sampleB ATGGCG
+    sampleC ATGGCC
+
+    Now, let's set the minimum to 2:
+
+    >>> bin_f = lambda item: item[0]
+    >>> for bin_, item in sorted(isubsample(seqs, 2, 2, bin_f=bin_f)):
+    ...     print(bin_, item[1])
+    sampleA AATTGG
+    sampleA ATGGCA
+    sampleB ATATATAT
+    sampleB ATGGCG
+    """
+    if minimum > maximum:
+        raise ValueError("minimum cannot be > maximum.")
+    if minimum < 1 or maximum < 1:
+        raise ValueError("minimum and maximum must be > 0.")
+    if bin_f is None:
+        def bin_f(x):
+            return True
+
+    # buffer some random values
+    random_values = np.random.randint(0, sys.maxsize, buf_size)
+    random_idx = 0
+
+    result = defaultdict(list)
+    for item in items:
+        bin_ = bin_f(item)
+        heap = result[bin_]
+
+        # pull a random value, and recompute random values if we've consumed
+        # our buffer
+        random_value = random_values[random_idx]
+        random_idx += 1
+        if random_idx >= buf_size:
+            random_values = np.random.randint(0, sys.maxsize, buf_size)
+            random_idx = 0
+
+        # push our item on to the heap and drop the smallest if necessary
+        heappush(heap, (random_value, copy(item)))
+        if len(heap) > maximum:
+            heappop(heap)
+
+    # yield items
+    for bin_, heap in viewitems(result):
+        if len(heap) < minimum:
+            continue
+
+        for _, item in heap:
+            yield (bin_, item)
+
+
+def subsample(counts, n, replace=False):
+    """Randomly subsample from a vector of counts, with or without replacement.
+
+    .. note:: Deprecated in scikit-bio 0.2.1-dev
+       ``subsample`` will be removed in scikit-bio 0.3.0. It is replaced by
+       ``subsample_counts``, which provides an identical interface; only the
+       function name has changed.
+
+    Parameters
+    ----------
+    counts : 1-D array_like
+        Vector of counts (integers) to randomly subsample from.
+    n : int
+        Number of items to subsample from `counts`. Must be less than or equal
+        to the sum of `counts`.
+    replace : bool, optional
+        If ``True``, subsample with replacement. If ``False`` (the default),
+        subsample without replacement.
+
+    Returns
+    -------
+    subsampled : ndarray
+        Subsampled vector of counts where the sum of the elements equals `n`
+        (i.e., ``subsampled.sum() == n``). Will have the same shape as
+        `counts`.
+
+    Raises
+    ------
+    TypeError
+        If `counts` cannot be safely converted to an integer datatype.
+    ValueError
+        If `n` is less than zero or greater than the sum of `counts`.
+
+    Raises
+    ------
+    EfficiencyWarning
+        If the accelerated code isn't present or hasn't been compiled.
+
+    See Also
+    --------
+    subsample_counts
+
+    Notes
+    -----
+    If subsampling is performed without replacement (``replace=False``), a copy
+    of `counts` is returned if `n` is equal to the number of items in `counts`,
+    as all items will be chosen from the original vector.
+
+    If subsampling is performed with replacement (``replace=True``) and `n` is
+    equal to the number of items in `counts`, the subsampled vector that is
+    returned may not necessarily be the same vector as `counts`.
+
+    """
+    warn("skbio.stats.subsample is deprecated and will be removed in "
+         "scikit-bio 0.3.0. Please update your code to use "
+         "skbio.stats.subsample_counts.", DeprecationWarning)
+    return subsample_counts(counts, n, replace=replace)
+
+
+def subsample_counts(counts, n, replace=False):
+    """Randomly subsample from a vector of counts, with or without replacement.
+
+    Parameters
+    ----------
+    counts : 1-D array_like
+        Vector of counts (integers) to randomly subsample from.
+    n : int
+        Number of items to subsample from `counts`. Must be less than or equal
+        to the sum of `counts`.
+    replace : bool, optional
+        If ``True``, subsample with replacement. If ``False`` (the default),
+        subsample without replacement.
+
+    Returns
+    -------
+    subsampled : ndarray
+        Subsampled vector of counts where the sum of the elements equals `n`
+        (i.e., ``subsampled.sum() == n``). Will have the same shape as
+        `counts`.
+
+    Raises
+    ------
+    TypeError
+        If `counts` cannot be safely converted to an integer datatype.
+    ValueError
+        If `n` is less than zero or greater than the sum of `counts`.
+    EfficiencyWarning
+        If the accelerated code isn't present or hasn't been compiled.
+
+    See Also
+    --------
+    isubsample
+    skbio.diversity.alpha
+
+    Notes
+    -----
+    If subsampling is performed without replacement (``replace=False``), a copy
+    of `counts` is returned if `n` is equal to the number of items in `counts`,
+    as all items will be chosen from the original vector.
+
+    If subsampling is performed with replacement (``replace=True``) and `n` is
+    equal to the number of items in `counts`, the subsampled vector that is
+    returned may not necessarily be the same vector as `counts`.
+
+    Examples
+    --------
+    Subsample 4 items (without replacement) from a vector of counts:
+
+    >>> import numpy as np
+    >>> from skbio.stats import subsample_counts
+    >>> a = np.array([4, 5, 0, 2, 1])
+    >>> sub = subsample_counts(a, 4)
+    >>> sub.sum()
+    4
+    >>> sub.shape
+    (5,)
+
+    Trying to subsample an equal number of items (without replacement) results
+    in the same vector as our input:
+
+    >>> subsample_counts([0, 3, 0, 1], 4)
+    array([0, 3, 0, 1])
+
+    Subsample 5 items (with replacement):
+
+    >>> sub = subsample_counts([1, 0, 1, 2, 2, 3, 0, 1], 5, replace=True)
+    >>> sub.sum()
+    5
+    >>> sub.shape
+    (8,)
+
+    """
+    if n < 0:
+        raise ValueError("n cannot be negative.")
+
+    counts = np.asarray(counts)
+    counts = counts.astype(int, casting='safe')
+
+    if counts.ndim != 1:
+        raise ValueError("Only 1-D vectors are supported.")
+
+    counts_sum = counts.sum()
+    if n > counts_sum:
+        raise ValueError("Cannot subsample more items than exist in input "
+                         "counts vector.")
+
+    if replace:
+        probs = counts / counts_sum
+        result = np.random.multinomial(n, probs)
+    else:
+        if counts_sum == n:
+            result = counts
+        else:
+            try:
+                result = _subsample_counts_without_replacement(counts, n,
+                                                               counts_sum)
+            except NameError:
+                warn("Accelerated subsampling without replacement isn't"
+                     " available.", EfficiencyWarning)
+
+                nz = counts.nonzero()[0]
+                unpacked = np.concatenate([np.repeat(np.array(i,), counts[i])
+                                           for i in nz])
+                permuted = np.random.permutation(unpacked)[:n]
+
+                result = np.zeros(len(counts), dtype=int)
+                for p in permuted:
+                    result[p] += 1
+
+    return result
diff --git a/skbio/stats/distance/__init__.py b/skbio/stats/distance/__init__.py
new file mode 100644
index 0000000..6409e6b
--- /dev/null
+++ b/skbio/stats/distance/__init__.py
@@ -0,0 +1,206 @@
+"""
+Distance matrices and distance-based statistics (:mod:`skbio.stats.distance`)
+=============================================================================
+
+.. currentmodule:: skbio.stats.distance
+
+This subpackage provides functionality for serializing, deserializing, and
+manipulating dissimilarity and distance matrices in memory. It also contains
+various statistical methods that operate on distance matrices, often relating
+distances (e.g., community distances) to categorical and/or continuous
+variables of interest (e.g., gender or age). Methods are also provided for
+comparing distance matrices (e.g., computing the correlation between two or
+more distance matrices using the Mantel test).
+
+Data Structures: DissimilarityMatrix and DistanceMatrix
+-------------------------------------------------------
+
+This package provides two matrix classes, `DissimilarityMatrix` and
+`DistanceMatrix`. Both classes can store measures of difference/distinction
+between objects. A dissimilarity/distance matrix includes both a matrix of
+dissimilarities/distances (floats) between objects, as well as unique IDs
+(object labels; strings) identifying each object in the matrix.
+
+`DissimilarityMatrix` can be used to store measures of dissimilarity between
+objects, and does not require that the dissimilarities are symmetric (e.g.,
+dissimilarities obtained using the *Gain in PD* measure [1]_).
+`DissimilarityMatrix` is a more general container to store differences than
+`DistanceMatrix`.
+
+`DistanceMatrix` has the additional requirement that the differences it
+stores are symmetric (e.g., Euclidean or Hamming distances).
+
+.. note:: `DissimilarityMatrix` can be used to store distances, but it is
+   recommended to use `DistanceMatrix` to store this type of data as it
+   provides an additional check for symmetry. A distance matrix *is a*
+   dissimilarity matrix; this is modeled in the class design by having
+   `DistanceMatrix` subclass `DissimilarityMatrix`.
+
+Classes
+^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   DissimilarityMatrix
+   DistanceMatrix
+
+Functions
+^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   randdm
+
+Exceptions
+^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   DissimilarityMatrixError
+   DistanceMatrixError
+   MissingIDError
+
+Examples
+^^^^^^^^
+Assume we have the following delimited text file storing distances between
+three objects with IDs ``a``, ``b``, and ``c``::
+
+    \\ta\\tb\\tc
+    a\\t0.0\\t0.5\\t1.0
+    b\\t0.5\\t0.0\\t0.75
+    c\\t1.0\\t0.75\\t0.0
+
+Load a distance matrix from the file:
+
+>>> from StringIO import StringIO
+>>> from skbio import DistanceMatrix
+>>> dm_fh = StringIO("\\ta\\tb\\tc\\n"
+...                  "a\\t0.0\\t0.5\\t1.0\\n"
+...                  "b\\t0.5\\t0.0\\t0.75\\n"
+...                  "c\\t1.0\\t0.75\\t0.0\\n")
+>>> dm = DistanceMatrix.read(dm_fh)
+>>> print(dm)
+3x3 distance matrix
+IDs:
+'a', 'b', 'c'
+Data:
+[[ 0.    0.5   1.  ]
+ [ 0.5   0.    0.75]
+ [ 1.    0.75  0.  ]]
+
+Access the distance (scalar) between objects ``'a'`` and ``'c'``:
+
+>>> dm['a', 'c']
+1.0
+
+Get a row vector of distances between object ``'b'`` and all other objects:
+
+>>> dm['b']
+array([ 0.5 ,  0.  ,  0.75])
+
+numpy indexing/slicing also works as expected. Extract the third column:
+
+>>> dm[:, 2]
+array([ 1.  ,  0.75,  0.  ])
+
+Serialize the distance matrix to delimited text file:
+
+>>> out_fh = StringIO()
+>>> dm.write(out_fh)
+>>> out_fh.getvalue() == dm_fh.getvalue()
+True
+
+A distance matrix object can also be created from an existing ``numpy.array``
+(or an array-like object, such as a nested Python list):
+
+>>> import numpy as np
+>>> data = np.array([[0.0, 0.5, 1.0],
+...                  [0.5, 0.0, 0.75],
+...                  [1.0, 0.75, 0.0]])
+>>> ids = ["a", "b", "c"]
+>>> dm_from_np = DistanceMatrix(data, ids)
+>>> print(dm_from_np)
+3x3 distance matrix
+IDs:
+'a', 'b', 'c'
+Data:
+[[ 0.    0.5   1.  ]
+ [ 0.5   0.    0.75]
+ [ 1.    0.75  0.  ]]
+>>> dm_from_np == dm
+True
+
+IDs may be omitted when constructing a dissimilarity/distance matrix.
+Monotonically-increasing integers (cast as strings) will be automatically used:
+
+>>> dm = DistanceMatrix(data)
+>>> dm.ids
+('0', '1', '2')
+
+Distance-based statistics
+-------------------------
+
+In addition to the data structures described above, this package provides the
+following distance-based statistical methods.
+
+Categorical Variable Stats
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   anosim
+   permanova
+   CategoricalStatsResults
+
+Continuous Variable Stats
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   bioenv
+
+Distance Matrix Comparisons
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   mantel
+   pwmantel
+
+References
+----------
+.. [1] Faith, D. P. (1992). "Conservation evaluation and phylogenetic
+   diversity".
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._base import (DissimilarityMatrixError, DistanceMatrixError,
+                    MissingIDError, DissimilarityMatrix, DistanceMatrix,
+                    CategoricalStatsResults, randdm)
+from ._bioenv import bioenv
+from ._anosim import anosim, ANOSIM
+from ._permanova import permanova, PERMANOVA
+from ._mantel import mantel, pwmantel
+
+__all__ = ['DissimilarityMatrixError', 'DistanceMatrixError', 'MissingIDError',
+           'DissimilarityMatrix', 'DistanceMatrix', 'randdm', 'anosim',
+           'ANOSIM', 'permanova', 'PERMANOVA', 'CategoricalStatsResults',
+           'bioenv', 'mantel', 'pwmantel']
+
+test = Tester().test
diff --git a/skbio/stats/distance/_anosim.py b/skbio/stats/distance/_anosim.py
new file mode 100644
index 0000000..51b7b49
--- /dev/null
+++ b/skbio/stats/distance/_anosim.py
@@ -0,0 +1,275 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import warnings
+from functools import partial
+
+import numpy as np
+from scipy.stats import rankdata
+
+from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results,
+                    CategoricalStats)
+
+
+def anosim(distance_matrix, grouping, column=None, permutations=999):
+    """Test for significant differences between groups using ANOSIM.
+
+    Analysis of Similarities (ANOSIM) is a non-parametric method that tests
+    whether two or more groups of objects (e.g., samples) are significantly
+    different based on a categorical factor. The ranks of the distances in the
+    distance matrix are used to calculate an R statistic, which ranges between
+    -1 (anti-grouping) to +1 (strong grouping), with an R value of 0 indicating
+    random grouping.
+
+    Statistical significance is assessed via a permutation test. The assignment
+    of objects to groups (`grouping`) is randomly permuted a number of times
+    (controlled via `permutations`). An R statistic is computed for each
+    permutation and the p-value is the proportion of permuted R statisics that
+    are equal to or greater than the original (unpermuted) R statistic.
+
+    Parameters
+    ----------
+    distance_matrix : DistanceMatrix
+        Distance matrix containing distances between objects (e.g., distances
+        between samples of microbial communities).
+    grouping : 1-D array_like or pandas.DataFrame
+        Vector indicating the assignment of objects to groups. For example,
+        these could be strings or integers denoting which group an object
+        belongs to. If `grouping` is 1-D ``array_like``, it must be the same
+        length and in the same order as the objects in `distance_matrix`. If
+        `grouping` is a ``DataFrame``, the column specified by `column` will be
+        used as the grouping vector. The ``DataFrame`` must be indexed by the
+        IDs in `distance_matrix` (i.e., the row labels must be distance matrix
+        IDs), but the order of IDs between `distance_matrix` and the
+        ``DataFrame`` need not be the same. All IDs in the distance matrix must
+        be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
+        allowed (they are ignored in the calculations).
+    column : str, optional
+        Column name to use as the grouping vector if `grouping` is a
+        ``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
+        Cannot be provided if `grouping` is 1-D ``array_like``.
+    permutations : int, optional
+        Number of permutations to use when assessing statistical
+        significance. Must be greater than or equal to zero. If zero,
+        statistical significance calculations will be skipped and the p-value
+        will be ``np.nan``.
+
+    Returns
+    -------
+    pandas.Series
+        Results of the statistical test, including ``test statistic`` and
+        ``p-value``.
+
+    See Also
+    --------
+    permanova
+
+    Notes
+    -----
+    See [1]_ for the original method reference. The general algorithm and
+    interface are similar to ``vegan::anosim``, available in R's vegan package
+    [2]_.
+
+    The p-value will be ``np.nan`` if `permutations` is zero.
+
+    References
+    ----------
+    .. [1] Clarke, KR. "Non-parametric multivariate analyses of changes in
+       community structure." Australian journal of ecology 18.1 (1993):
+       117-143.
+
+    .. [2] http://cran.r-project.org/web/packages/vegan/index.html
+
+    Examples
+    --------
+    Load a 4x4 distance matrix and grouping vector denoting 2 groups of
+    objects:
+
+    >>> from skbio import DistanceMatrix
+    >>> dm = DistanceMatrix([[0, 1, 1, 4],
+    ...                      [1, 0, 3, 2],
+    ...                      [1, 3, 0, 3],
+    ...                      [4, 2, 3, 0]],
+    ...                     ['s1', 's2', 's3', 's4'])
+    >>> grouping = ['Group1', 'Group1', 'Group2', 'Group2']
+
+    Run ANOSIM using 99 permutations to calculate the p-value:
+
+    >>> import numpy as np
+    >>> # make output deterministic; not necessary for normal use
+    >>> np.random.seed(0)
+    >>> from skbio.stats.distance import anosim
+    >>> anosim(dm, grouping, permutations=99)
+    method name               ANOSIM
+    test statistic name            R
+    sample size                    4
+    number of groups               2
+    test statistic              0.25
+    p-value                     0.67
+    number of permutations        99
+    Name: ANOSIM results, dtype: object
+
+    The return value is a ``pandas.Series`` object containing the results of
+    the statistical test.
+
+    To suppress calculation of the p-value and only obtain the R statistic,
+    specify zero permutations:
+
+    >>> anosim(dm, grouping, permutations=0)
+    method name               ANOSIM
+    test statistic name            R
+    sample size                    4
+    number of groups               2
+    test statistic              0.25
+    p-value                      NaN
+    number of permutations         0
+    Name: ANOSIM results, dtype: object
+
+    You can also provide a ``pandas.DataFrame`` and a column denoting the
+    grouping instead of a grouping vector. The following ``DataFrame``'s
+    ``Group`` column specifies the same grouping as the vector we used in the
+    previous examples:
+
+    >>> # make output deterministic; not necessary for normal use
+    >>> np.random.seed(0)
+    >>> import pandas as pd
+    >>> df = pd.DataFrame.from_dict(
+    ...     {'Group': {'s2': 'Group1', 's3': 'Group2', 's4': 'Group2',
+    ...                's5': 'Group3', 's1': 'Group1'}})
+    >>> anosim(dm, df, column='Group', permutations=99)
+    method name               ANOSIM
+    test statistic name            R
+    sample size                    4
+    number of groups               2
+    test statistic              0.25
+    p-value                     0.67
+    number of permutations        99
+    Name: ANOSIM results, dtype: object
+
+    The results match the first example above.
+
+    Note that when providing a ``DataFrame``, the ordering of rows and/or
+    columns does not affect the grouping vector that is extracted. The
+    ``DataFrame`` must be indexed by the distance matrix IDs (i.e., the row
+    labels must be distance matrix IDs).
+
+    If IDs (rows) are present in the ``DataFrame`` but not in the distance
+    matrix, they are ignored. The previous example's ``s5`` ID illustrates this
+    behavior: note that even though the ``DataFrame`` had 5 objects, only 4
+    were used in the test (see the "Sample size" row in the results above to
+    confirm this). Thus, the ``DataFrame`` can be a superset of the distance
+    matrix IDs. Note that the reverse is not true: IDs in the distance matrix
+    *must* be present in the ``DataFrame`` or an error will be raised.
+
+    """
+    sample_size, num_groups, grouping, tri_idxs, distances = _preprocess_input(
+        distance_matrix, grouping, column)
+
+    divisor = sample_size * ((sample_size - 1) / 4)
+    ranked_dists = rankdata(distances, method='average')
+
+    test_stat_function = partial(_compute_r_stat, tri_idxs, ranked_dists,
+                                 divisor)
+    stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
+                                           permutations)
+
+    return _build_results('ANOSIM', 'R', sample_size, num_groups, stat,
+                          p_value, permutations)
+
+
+def _compute_r_stat(tri_idxs, ranked_dists, divisor, grouping):
+    """Compute ANOSIM R statistic (between -1 and +1)."""
+    # Create a matrix where True means that the two objects are in the same
+    # group. This ufunc requires that grouping is a numeric vector (e.g., it
+    # won't work with a grouping vector of strings).
+    grouping_matrix = np.equal.outer(grouping, grouping)
+
+    # Extract upper triangle from the grouping matrix. It is important to
+    # extract the values in the same order that the distances are extracted
+    # from the distance matrix (see ranked_dists). Extracting the upper
+    # triangle (excluding the diagonal) preserves this order.
+    grouping_tri = grouping_matrix[tri_idxs]
+
+    # within
+    r_W = np.mean(ranked_dists[grouping_tri])
+
+    # between
+    r_B = np.mean(ranked_dists[np.invert(grouping_tri)])
+
+    return (r_B - r_W) / divisor
+
+
+class ANOSIM(CategoricalStats):
+    """ANOSIM statistical method executor.
+
+    .. note:: Deprecated in scikit-bio 0.2.1-dev
+       ``ANOSIM`` will be removed in scikit-bio 0.3.0. It is replaced by
+       ``anosim``, which provides a simpler procedural interface to running
+       this statistical method.
+
+    Analysis of Similarities (ANOSIM) is a non-parametric method that tests
+    whether two or more groups of objects are significantly different based on
+    a categorical factor. The ranks of the distances in the distance matrix are
+    used to calculate an R statistic, which ranges between -1 (anti-grouping)
+    to +1 (strong grouping), with an R value of 0 indicating random grouping.
+
+    Notes
+    -----
+    See [1]_ for the original ANOSIM reference. The general algorithm and
+    interface are similar to ``vegan::anosim``, available in R's vegan package
+    [2]_.
+
+    References
+    ----------
+    .. [1] Clarke, KR. "Non-parametric multivariate analyses of changes in
+       community structure." Australian journal of ecology 18.1 (1993):
+       117-143.
+
+    .. [2] http://cran.r-project.org/web/packages/vegan/index.html
+
+    """
+
+    short_method_name = 'ANOSIM'
+    long_method_name = 'Analysis of Similarities'
+    test_statistic_name = 'R statistic'
+
+    def __init__(self, distance_matrix, grouping, column=None):
+        warnings.warn(
+            "skbio.stats.distance.ANOSIM is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use "
+            "skbio.stats.distance.anosim.", DeprecationWarning)
+
+        super(ANOSIM, self).__init__(distance_matrix, grouping, column=column)
+
+        self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4)
+        self._ranked_dists = rankdata(self._dm.condensed_form(),
+                                      method='average')
+
+    def _run(self, grouping):
+        """Compute ANOSIM R statistic (between -1 and +1)."""
+        # Create a matrix where True means that the two objects are in the same
+        # group. This ufunc requires that grouping is a numeric vector (e.g.,
+        # it won't work with a grouping vector of strings).
+        grouping_matrix = np.equal.outer(grouping, grouping)
+
+        # Extract upper triangle from the grouping matrix. It is important to
+        # extract the values in the same order that the distances are extracted
+        # from the distance matrix (see self._ranked_dists). Extracting the
+        # upper triangle (excluding the diagonal) preserves this order.
+        grouping_tri = grouping_matrix[self._tri_idxs]
+
+        return self._compute_r_stat(grouping_tri)
+
+    def _compute_r_stat(self, grouping_tri):
+        # within
+        r_W = np.mean(self._ranked_dists[grouping_tri])
+        # between
+        r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)])
+        return (r_B - r_W) / self._divisor
diff --git a/skbio/stats/distance/_base.py b/skbio/stats/distance/_base.py
new file mode 100644
index 0000000..1579431
--- /dev/null
+++ b/skbio/stats/distance/_base.py
@@ -0,0 +1,1232 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import StringIO, string_types
+
+import csv
+import warnings
+from copy import deepcopy
+
+import matplotlib.pyplot as plt
+from IPython.core.pylabtools import print_figure
+from IPython.core.display import Image, SVG
+import numpy as np
+import pandas as pd
+from scipy.spatial.distance import squareform
+
+from skbio._base import SkbioObject
+from skbio.stats import p_value_to_str
+from skbio.stats._misc import _pprint_strs
+from skbio.util import find_duplicates
+
+
+class DissimilarityMatrixError(Exception):
+    """General error for dissimilarity matrix validation failures."""
+    pass
+
+
+class DistanceMatrixError(DissimilarityMatrixError):
+    """General error for distance matrix validation failures."""
+    pass
+
+
+class MissingIDError(DissimilarityMatrixError):
+    """Error for ID lookup that doesn't exist in the dissimilarity matrix."""
+
+    def __init__(self, missing_id):
+        super(MissingIDError, self).__init__()
+        self.args = ("The ID '%s' is not in the dissimilarity matrix." %
+                     missing_id,)
+
+
+class DissimilarityMatrix(SkbioObject):
+    """Store dissimilarities between objects.
+
+    A `DissimilarityMatrix` instance stores a square, hollow, two-dimensional
+    matrix of dissimilarities between objects. Objects could be, for example,
+    samples or DNA sequences. A sequence of IDs accompanies the
+    dissimilarities.
+
+    Methods are provided to load and save dissimilarity matrices from/to disk,
+    as well as perform common operations such as extracting dissimilarities
+    based on object ID.
+
+    Parameters
+    ----------
+    data : array_like or DissimilarityMatrix
+        Square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
+        (floats), or a structure that can be converted to a ``numpy.ndarray``
+        using ``numpy.asarray``. Can instead be a `DissimilarityMatrix` (or
+        subclass) instance, in which case the instance's data will be used.
+        Data will be converted to a float ``dtype`` if necessary. A copy will
+        *not* be made if already a ``numpy.ndarray`` with a float ``dtype``.
+    ids : sequence of str, optional
+        Sequence of strings to be used as object IDs. Must match the number of
+        rows/cols in `data`. If ``None`` (the default), IDs will be
+        monotonically-increasing integers cast as strings, with numbering
+        starting from zero, e.g., ``('0', '1', '2', '3', ...)``.
+
+    Attributes
+    ----------
+    data
+    ids
+    dtype
+    shape
+    size
+    T
+    png
+    svg
+
+    See Also
+    --------
+    DistanceMatrix
+
+    Notes
+    -----
+    The dissimilarities are stored in redundant (square-form) format [1]_.
+
+    The data are not checked for symmetry, nor guaranteed/assumed to be
+    symmetric.
+
+    References
+    ----------
+    .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
+
+    """
+    default_write_format = 'lsmat'
+    # Used in __str__
+    _matrix_element_name = 'dissimilarity'
+
+    @classmethod
+    def from_file(cls, lsmat_f, delimiter='\t'):
+        """Load dissimilarity matrix from delimited text file.
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``from_file`` will be removed in scikit-bio 0.3.0. It is replaced by
+           ``read``, which is a more general method for deserializing
+           dissimilarity/distance matrices. ``read`` supports multiple file
+           formats, automatic file format detection, etc. by taking advantage
+           of scikit-bio's I/O registry system. See :mod:`skbio.io` for more
+           details.
+
+        Creates a ``DissimilarityMatrix`` (or subclass) instance from a
+        ``lsmat`` formatted file. See :mod:`skbio.io.lsmat` for the format
+        specification.
+
+        Parameters
+        ----------
+        lsmat_f: filepath or filehandle
+            File to read from.
+        delimiter : str, optional
+            String delimiting elements in `lsmat_f`.
+
+        Returns
+        -------
+        DissimilarityMatrix
+            Instance of type `cls` containing the parsed contents of `lsmat_f`.
+
+        See Also
+        --------
+        read
+
+        """
+        warnings.warn(
+            "DissimilarityMatrix.from_file and DistanceMatrix.from_file are "
+            "deprecated and will be removed in scikit-bio 0.3.0. Please "
+            "update your code to use DissimilarityMatrix.read and "
+            "DistanceMatrix.read.", DeprecationWarning)
+        return cls.read(lsmat_f, format='lsmat', delimiter=delimiter)
+
+    def to_file(self, out_f, delimiter='\t'):
+        """Save dissimilarity matrix to file as delimited text.
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``to_file`` will be removed in scikit-bio 0.3.0. It is replaced by
+           ``write``, which is a more general method for serializing
+           dissimilarity/distance matrices. ``write`` supports multiple file
+           formats by taking advantage of scikit-bio's I/O registry system.
+           See :mod:`skbio.io` for more details.
+
+        Serializes dissimilarity matrix as a ``lsmat`` formatted file. See
+        :mod:`skbio.io.lsmat` for the format specification.
+
+        Parameters
+        ----------
+        out_f : filepath or filehandle
+            File to write to.
+        delimiter : str, optional
+            Delimiter used to separate elements in output format.
+
+        See Also
+        --------
+        write
+
+        """
+        warnings.warn(
+            "DissimilarityMatrix.to_file and DistanceMatrix.to_file are "
+            "deprecated and will be removed in scikit-bio 0.3.0. Please "
+            "update your code to use DissimilarityMatrix.write and "
+            "DistanceMatrix.write.", DeprecationWarning)
+        self.write(out_f, format='lsmat', delimiter=delimiter)
+
+    def __init__(self, data, ids=None):
+        if isinstance(data, DissimilarityMatrix):
+            data = data.data
+        data = np.asarray(data, dtype='float')
+
+        if ids is None:
+            ids = (str(i) for i in range(data.shape[0]))
+        ids = tuple(ids)
+
+        self._validate(data, ids)
+
+        self._data = data
+        self._ids = ids
+        self._id_index = self._index_list(self._ids)
+
+    @property
+    def data(self):
+        """Array of dissimilarities.
+
+        A square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
+        (floats). A copy is *not* returned.
+
+        Notes
+        -----
+        This property is not writeable.
+
+        """
+        return self._data
+
+    @property
+    def ids(self):
+        """Tuple of object IDs.
+
+        A tuple of strings, one for each object in the dissimilarity matrix.
+
+        Notes
+        -----
+        This property is writeable, but the number of new IDs must match the
+        number of objects in `data`.
+
+        """
+        return self._ids
+
+    @ids.setter
+    def ids(self, ids_):
+        ids_ = tuple(ids_)
+        self._validate(self.data, ids_)
+        self._ids = ids_
+        self._id_index = self._index_list(self._ids)
+
+    @property
+    def dtype(self):
+        """Data type of the dissimilarities."""
+        return self.data.dtype
+
+    @property
+    def shape(self):
+        """Two-element tuple containing the dissimilarity matrix dimensions.
+
+        Notes
+        -----
+        As the dissimilarity matrix is guaranteed to be square, both tuple
+        entries will always be equal.
+
+        """
+        return self.data.shape
+
+    @property
+    def size(self):
+        """Total number of elements in the dissimilarity matrix.
+
+        Notes
+        -----
+        Equivalent to ``self.shape[0] * self.shape[1]``.
+
+        """
+        return self.data.size
+
+    @property
+    def T(self):
+        """Transpose of the dissimilarity matrix.
+
+        See Also
+        --------
+        transpose
+
+        """
+        return self.transpose()
+
+    def transpose(self):
+        """Return the transpose of the dissimilarity matrix.
+
+        Notes
+        -----
+        A deep copy is returned.
+
+        Returns
+        -------
+        DissimilarityMatrix
+            Transpose of the dissimilarity matrix. Will be the same type as
+            `self`.
+
+        """
+        return self.__class__(self.data.T.copy(), deepcopy(self.ids))
+
+    def index(self, lookup_id):
+        """Return the index of the specified ID.
+
+        Parameters
+        ----------
+        lookup_id : str
+            ID whose index will be returned.
+
+        Returns
+        -------
+        int
+            Row/column index of `lookup_id`.
+
+        Raises
+        ------
+        MissingIDError
+            If `lookup_id` is not in the dissimilarity matrix.
+
+        """
+        if lookup_id in self:
+            return self._id_index[lookup_id]
+        else:
+            raise MissingIDError(lookup_id)
+
+    def redundant_form(self):
+        """Return an array of dissimilarities in redundant format.
+
+        As this is the native format that the dissimilarities are stored in,
+        this is simply an alias for `data`.
+
+        Returns
+        -------
+        ndarray
+            Two-dimensional ``numpy.ndarray`` of dissimilarities in redundant
+            format.
+
+        Notes
+        -----
+        Redundant format is described in [1]_.
+
+        Does *not* return a copy of the data.
+
+        References
+        ----------
+        .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
+
+        """
+        return self.data
+
+    def copy(self):
+        """Return a deep copy of the dissimilarity matrix.
+
+        Returns
+        -------
+        DissimilarityMatrix
+            Deep copy of the dissimilarity matrix. Will be the same type as
+            `self`.
+
+        """
+        # We deepcopy IDs in case the tuple contains mutable objects at some
+        # point in the future.
+        return self.__class__(self.data.copy(), deepcopy(self.ids))
+
+    def filter(self, ids, strict=True):
+        """Filter the dissimilarity matrix by IDs.
+
+        Parameters
+        ----------
+        ids : iterable of str
+            IDs to retain. May not contain duplicates or be empty. Each ID must
+            be present in the dissimilarity matrix.
+        strict : bool, optional
+            If `strict` is ``True`` and an ID that is not found in the distance
+            matrix is found in `ids`, a ``MissingIDError`` exception will be
+            raised, otherwise the ID will be ignored.
+
+        Returns
+        -------
+        DissimilarityMatrix
+            Filtered dissimilarity matrix containing only the IDs specified in
+            `ids`. IDs will be in the same order as they appear in `ids`.
+
+        Raises
+        ------
+        MissingIDError
+            If an ID in `ids` is not in the object's list of IDs.
+        """
+        if strict:
+            idxs = [self.index(id_) for id_ in ids]
+        else:
+            # get the indices to slice the inner numpy array
+            idxs = []
+            # save the IDs that were found in the distance matrix
+            found_ids = []
+            for id_ in ids:
+                try:
+                    idxs.append(self.index(id_))
+                    found_ids.append(id_)
+                except MissingIDError:
+                    pass
+            ids = found_ids
+
+        filtered_data = self._data[idxs][:, idxs]
+        return self.__class__(filtered_data, ids)
+
+    def plot(self, cmap=None, title=""):
+        """Creates a heatmap of the dissimilarity matrix
+
+        Parameters
+        ----------
+        cmap: str or matplotlib.colors.Colormap, optional
+            Sets the color scheme of the heatmap
+            If ``None``, defaults to the colormap specified in the matplotlib
+            rc file.
+
+        title: str, optional
+            Sets the title label of the heatmap
+            (Default is blank)
+
+        Returns
+        -------
+        matplotlib.figure.Figure
+            Figure containing the heatmap and colorbar of the plotted
+            dissimilarity matrix.
+
+        Examples
+        --------
+        .. plot::
+
+           Define a dissimilarity matrix with five objects labeled A-E:
+
+           >>> from skbio.stats.distance import DissimilarityMatrix
+           >>> dm = DissimilarityMatrix([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3],
+           ...                           [2, 1, 0, 1, 2], [3, 2, 1, 0, 1],
+           ...                           [4, 3, 2, 1, 0]],
+           ...                          ['A', 'B', 'C', 'D', 'E'])
+
+           Plot the dissimilarity matrix as a heatmap:
+
+           >>> fig = dm.plot(cmap='Reds', title='Example heatmap')
+
+        """
+        # based on http://stackoverflow.com/q/14391959/3776794
+        fig, ax = plt.subplots()
+
+        # use pcolormesh instead of pcolor for performance
+        heatmap = ax.pcolormesh(self.data, cmap=cmap)
+        fig.colorbar(heatmap)
+
+        # center labels within each cell
+        ticks = np.arange(0.5, self.shape[0])
+        ax.set_xticks(ticks, minor=False)
+        ax.set_yticks(ticks, minor=False)
+
+        # display data as it is stored in the dissimilarity matrix
+        # (default is to have y-axis inverted)
+        ax.invert_yaxis()
+
+        ax.set_xticklabels(self.ids, rotation=90, minor=False)
+        ax.set_yticklabels(self.ids, minor=False)
+        ax.set_title(title)
+
+        return fig
+
+    def _repr_png_(self):
+        return self._figure_data('png')
+
+    def _repr_svg_(self):
+        return self._figure_data('svg')
+
+    @property
+    def png(self):
+        """Display heatmap in IPython Notebook as PNG.
+
+        """
+        return Image(self._repr_png_(), embed=True)
+
+    @property
+    def svg(self):
+        """Display heatmap in IPython Notebook as SVG.
+
+        """
+        return SVG(self._repr_svg_())
+
+    def _figure_data(self, format):
+        fig = self.plot()
+        data = print_figure(fig, format)
+        # We MUST close the figure, otherwise IPython's display machinery
+        # will pick it up and send it as output, resulting in a double display
+        plt.close(fig)
+        return data
+
+    def __str__(self):
+        """Return a string representation of the dissimilarity matrix.
+
+        Summary includes matrix dimensions, a (truncated) list of IDs, and
+        (truncated) array of dissimilarities.
+
+        Returns
+        -------
+        str
+            String representation of the dissimilarity matrix.
+
+        .. shownumpydoc
+
+        """
+        return '%dx%d %s matrix\nIDs:\n%s\nData:\n' % (
+            self.shape[0], self.shape[1], self._matrix_element_name,
+            _pprint_strs(self.ids)) + str(self.data)
+
+    def __eq__(self, other):
+        """Compare this dissimilarity matrix to another for equality.
+
+        Two dissimilarity matrices are equal if they have the same shape, IDs
+        (in the same order!), and have data arrays that are equal.
+
+        Checks are *not* performed to ensure that `other` is a
+        `DissimilarityMatrix` instance.
+
+        Parameters
+        ----------
+        other : DissimilarityMatrix
+            Dissimilarity matrix to compare to for equality.
+
+        Returns
+        -------
+        bool
+            ``True`` if `self` is equal to `other`, ``False`` otherwise.
+
+        .. shownumpydoc
+
+        """
+        equal = True
+
+        # The order these checks are performed in is important to be as
+        # efficient as possible. The check for shape equality is not strictly
+        # necessary as it should be taken care of in np.array_equal, but I'd
+        # rather explicitly bail before comparing IDs or data. Use array_equal
+        # instead of (a == b).all() because of this issue:
+        #     http://stackoverflow.com/a/10582030
+        try:
+            if self.shape != other.shape:
+                equal = False
+            elif self.ids != other.ids:
+                equal = False
+            elif not np.array_equal(self.data, other.data):
+                equal = False
+        except AttributeError:
+            equal = False
+
+        return equal
+
+    def __ne__(self, other):
+        """Determine whether two dissimilarity matrices are not equal.
+
+        Parameters
+        ----------
+        other : DissimilarityMatrix
+            Dissimilarity matrix to compare to.
+
+        Returns
+        -------
+        bool
+            ``True`` if `self` is not equal to `other`, ``False`` otherwise.
+
+        See Also
+        --------
+        __eq__
+
+        .. shownumpydoc
+
+        """
+        return not self == other
+
+    def __contains__(self, lookup_id):
+        """Check if the specified ID is in the dissimilarity matrix.
+
+        Parameters
+        ----------
+        lookup_id : str
+            ID to search for.
+
+        Returns
+        -------
+        bool
+            ``True`` if `lookup_id` is in the dissimilarity matrix, ``False``
+            otherwise.
+
+        See Also
+        --------
+        index
+
+        .. shownumpydoc
+
+        """
+        return lookup_id in self._id_index
+
+    def __getitem__(self, index):
+        """Slice into dissimilarity data by object ID or numpy indexing.
+
+        Extracts data from the dissimilarity matrix by object ID, a pair of
+        IDs, or numpy indexing/slicing.
+
+        Parameters
+        ----------
+        index : str, two-tuple of str, or numpy index
+            `index` can be one of the following forms: an ID, a pair of IDs, or
+            a numpy index.
+
+            If `index` is a string, it is assumed to be an ID and a
+            ``numpy.ndarray`` row vector is returned for the corresponding ID.
+            Note that the ID's row of dissimilarities is returned, *not* its
+            column. If the matrix is symmetric, the two will be identical, but
+            this makes a difference if the matrix is asymmetric.
+
+            If `index` is a two-tuple of strings, each string is assumed to be
+            an ID and the corresponding matrix element is returned that
+            represents the dissimilarity between the two IDs. Note that the
+            order of lookup by ID pair matters if the matrix is asymmetric: the
+            first ID will be used to look up the row, and the second ID will be
+            used to look up the column. Thus, ``dm['a', 'b']`` may not be the
+            same as ``dm['b', 'a']`` if the matrix is asymmetric.
+
+            Otherwise, `index` will be passed through to
+            ``DissimilarityMatrix.data.__getitem__``, allowing for standard
+            indexing of a ``numpy.ndarray`` (e.g., slicing).
+
+        Returns
+        -------
+        ndarray or scalar
+            Indexed data, where return type depends on the form of `index` (see
+            description of `index` for more details).
+
+        Raises
+        ------
+        MissingIDError
+            If the ID(s) specified in `index` are not in the dissimilarity
+            matrix.
+
+        Notes
+        -----
+        The lookup based on ID(s) is quick.
+
+        .. shownumpydoc
+
+        """
+        if isinstance(index, string_types):
+            return self.data[self.index(index)]
+        elif self._is_id_pair(index):
+            return self.data[self.index(index[0]), self.index(index[1])]
+        else:
+            return self.data.__getitem__(index)
+
+    def _validate(self, data, ids):
+        """Validate the data array and IDs.
+
+        Checks that the data is at least 1x1 in size, 2D, square, hollow, and
+        contains only floats. Also checks that IDs are unique and that the
+        number of IDs matches the number of rows/cols in the data array.
+
+        Subclasses can override this method to perform different/more specific
+        validation (e.g., see `DistanceMatrix`).
+
+        Notes
+        -----
+        Accepts arguments instead of inspecting instance attributes to avoid
+        creating an invalid dissimilarity matrix before raising an error.
+        Otherwise, the invalid dissimilarity matrix could be used after the
+        exception is caught and handled.
+
+        """
+        if 0 in data.shape:
+            raise DissimilarityMatrixError("Data must be at least 1x1 in "
+                                           "size.")
+        if len(data.shape) != 2:
+            raise DissimilarityMatrixError("Data must have exactly two "
+                                           "dimensions.")
+        if data.shape[0] != data.shape[1]:
+            raise DissimilarityMatrixError("Data must be square (i.e., have "
+                                           "the same number of rows and "
+                                           "columns).")
+        if data.dtype != np.double:
+            raise DissimilarityMatrixError("Data must contain only floating "
+                                           "point values.")
+        if np.trace(data) != 0:
+            raise DissimilarityMatrixError("Data must be hollow (i.e., the "
+                                           "diagonal can only contain zeros).")
+        duplicates = find_duplicates(ids)
+        if duplicates:
+            formatted_duplicates = ', '.join(repr(e) for e in duplicates)
+            raise DissimilarityMatrixError("IDs must be unique. Found the "
+                                           "following duplicate IDs: %s" %
+                                           formatted_duplicates)
+        if len(ids) != data.shape[0]:
+            raise DissimilarityMatrixError("The number of IDs (%d) must match "
+                                           "the number of rows/columns in the "
+                                           "data (%d)." %
+                                           (len(ids), data.shape[0]))
+
+    def _index_list(self, list_):
+        return {id_: idx for idx, id_ in enumerate(list_)}
+
+    def _is_id_pair(self, index):
+        return (isinstance(index, tuple) and
+                len(index) == 2 and
+                all(map(lambda e: isinstance(e, string_types), index)))
+
+
+class DistanceMatrix(DissimilarityMatrix):
+    """Store distances between objects.
+
+    A `DistanceMatrix` is a `DissimilarityMatrix` with the additional
+    requirement that the matrix data is symmetric. There are additional methods
+    made available that take advantage of this symmetry.
+
+    See Also
+    --------
+    DissimilarityMatrix
+
+    Notes
+    -----
+    The distances are stored in redundant (square-form) format [1]_. To
+    facilitate use with other scientific Python routines (e.g., scipy), the
+    distances can be retrieved in condensed (vector-form) format using
+    `condensed_form`.
+
+    `DistanceMatrix` only requires that the distances it stores are symmetric.
+    Checks are *not* performed to ensure the other three metric properties
+    hold (non-negativity, identity of indiscernibles, and triangle inequality)
+    [2]_. Thus, a `DistanceMatrix` instance can store distances that are not
+    metric.
+
+    References
+    ----------
+    .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
+    .. [2] http://planetmath.org/metricspace
+
+    """
+
+    # Override here, used in superclass __str__
+    _matrix_element_name = 'distance'
+
+    def condensed_form(self):
+        """Return an array of distances in condensed format.
+
+        Returns
+        -------
+        ndarray
+            One-dimensional ``numpy.ndarray`` of distances in condensed format.
+
+        Notes
+        -----
+        Condensed format is described in [1]_.
+
+        The conversion is not a constant-time operation, though it should be
+        relatively quick to perform.
+
+        References
+        ----------
+        .. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
+
+        """
+        return squareform(self._data, force='tovector', checks=False)
+
+    def permute(self, condensed=False):
+        """Randomly permute both rows and columns in the matrix.
+
+        Randomly permutes the ordering of rows and columns in the matrix. The
+        same permutation is applied to both rows and columns in order to
+        maintain symmetry and hollowness. Only the rows/columns in the distance
+        matrix are permuted; the IDs are *not* permuted.
+
+        Parameters
+        ----------
+        condensed : bool, optional
+            If ``True``, return the permuted distance matrix in condensed
+            format. Otherwise, return the permuted distance matrix as a new
+            ``DistanceMatrix`` instance.
+
+        Returns
+        -------
+        DistanceMatrix or ndarray
+            Permuted distances as a new ``DistanceMatrix`` or as a ``ndarray``
+            in condensed format.
+
+        See Also
+        --------
+        condensed_form
+
+        Notes
+        -----
+        This method does not modify the distance matrix that it is called on.
+        It is more efficient to pass ``condensed=True`` than permuting the
+        distance matrix and then converting to condensed format.
+
+        """
+        order = np.random.permutation(self.shape[0])
+        permuted = self._data[order][:, order]
+
+        if condensed:
+            return squareform(permuted, force='tovector', checks=False)
+        else:
+            return self.__class__(permuted, self.ids)
+
+    def _validate(self, data, ids):
+        """Validate the data array and IDs.
+
+        Overrides the superclass `_validate`. Performs a check for symmetry in
+        addition to the checks performed in the superclass.
+
+        """
+        super(DistanceMatrix, self)._validate(data, ids)
+
+        if (data.T != data).any():
+            raise DistanceMatrixError("Data must be symmetric.")
+
+
+def randdm(num_objects, ids=None, constructor=None, random_fn=None):
+    """Generate a distance matrix populated with random distances.
+
+    Using the default `random_fn`, distances are randomly drawn from a uniform
+    distribution over ``[0, 1)``.
+
+    Regardless of `random_fn`, the resulting distance matrix is guaranteed to
+    be symmetric and hollow.
+
+    Parameters
+    ----------
+    num_objects : int
+        The number of objects in the resulting distance matrix. For example, if
+        `num_objects` is 3, a 3x3 distance matrix will be returned.
+    ids : sequence of str or None, optional
+        A sequence of strings to be used as IDs. ``len(ids)`` must be equal to
+        `num_objects`. If not provided, IDs will be monotonically-increasing
+        integers cast as strings (numbering starts at 1). For example,
+        ``('1', '2', '3')``.
+    constructor : type, optional
+        `DissimilarityMatrix` or subclass constructor to use when creating the
+        random distance matrix. The returned distance matrix will be of this
+        type. If ``None`` (the default), a `DistanceMatrix` instance will be
+        returned.
+    random_fn : function, optional
+        Function to generate random values. `random_fn` must accept two
+        arguments (number of rows and number of columns) and return a 2D
+        ``numpy.ndarray`` of floats (or something that can be cast to float).
+        If ``None`` (the default), ``numpy.random.rand`` will be used.
+
+    Returns
+    -------
+    DissimilarityMatrix
+        `DissimilarityMatrix` (or subclass) instance of random distances. Type
+        depends on `constructor`.
+
+    See Also
+    --------
+    numpy.random.rand
+
+    """
+    if constructor is None:
+        constructor = DistanceMatrix
+    if random_fn is None:
+        random_fn = np.random.rand
+
+    data = np.tril(random_fn(num_objects, num_objects), -1)
+    data += data.T
+
+    if not ids:
+        ids = map(str, range(1, num_objects + 1))
+
+    return constructor(data, ids)
+
+
+# helper functions for anosim and permanova
+
+def _preprocess_input(distance_matrix, grouping, column):
+    """Compute intermediate results not affected by permutations.
+
+    These intermediate results can be computed a single time for efficiency,
+    regardless of grouping vector permutations (i.e., when calculating the
+    p-value). These intermediate results are used by both ANOSIM and PERMANOVA.
+
+    Also validates and normalizes input (e.g., converting ``DataFrame`` column
+    into grouping vector).
+
+    """
+    if not isinstance(distance_matrix, DistanceMatrix):
+        raise TypeError("Input must be a DistanceMatrix.")
+
+    if isinstance(grouping, pd.DataFrame):
+        if column is None:
+            raise ValueError(
+                "Must provide a column name if supplying a DataFrame.")
+        else:
+            grouping = _df_to_vector(distance_matrix, grouping, column)
+    elif column is not None:
+        raise ValueError(
+            "Must provide a DataFrame if supplying a column name.")
+
+    sample_size = distance_matrix.shape[0]
+    if len(grouping) != sample_size:
+        raise ValueError(
+            "Grouping vector size must match the number of IDs in the "
+            "distance matrix.")
+
+    # Find the group labels and convert grouping to an integer vector
+    # (factor).
+    groups, grouping = np.unique(grouping, return_inverse=True)
+    num_groups = len(groups)
+
+    if num_groups == len(grouping):
+        raise ValueError(
+            "All values in the grouping vector are unique. This method cannot "
+            "operate on a grouping vector with only unique values (e.g., "
+            "there are no 'within' distances because each group of objects "
+            "contains only a single object).")
+    if num_groups == 1:
+        raise ValueError(
+            "All values in the grouping vector are the same. This method "
+            "cannot operate on a grouping vector with only a single group of "
+            "objects (e.g., there are no 'between' distances because there is "
+            "only a single group).")
+
+    tri_idxs = np.triu_indices(sample_size, k=1)
+    distances = distance_matrix.condensed_form()
+
+    return sample_size, num_groups, grouping, tri_idxs, distances
+
+
+def _df_to_vector(distance_matrix, df, column):
+    """Return a grouping vector from a ``DataFrame`` column.
+
+    Parameters
+    ----------
+    distance_marix : DistanceMatrix
+        Distance matrix whose IDs will be mapped to group labels.
+    df : pandas.DataFrame
+        ``DataFrame`` (indexed by distance matrix ID).
+    column : str
+        Column name in `df` containing group labels.
+
+    Returns
+    -------
+    list
+        Grouping vector (vector of labels) based on the IDs in
+        `distance_matrix`. Each ID's label is looked up in the ``DataFrame``
+        under the column specified by `column`.
+
+    Raises
+    ------
+    ValueError
+        If `column` is not in the ``DataFrame``, or a distance matrix ID is
+        not in the ``DataFrame``.
+
+    """
+    if column not in df:
+        raise ValueError("Column '%s' not in DataFrame." % column)
+
+    grouping = df.loc[distance_matrix.ids, column]
+    if grouping.isnull().any():
+        raise ValueError(
+            "One or more IDs in the distance matrix are not in the data "
+            "frame.")
+    return grouping.tolist()
+
+
+def _run_monte_carlo_stats(test_stat_function, grouping, permutations):
+    """Run stat test and compute significance with Monte Carlo permutations."""
+    if permutations < 0:
+        raise ValueError(
+            "Number of permutations must be greater than or equal to zero.")
+
+    stat = test_stat_function(grouping)
+
+    p_value = np.nan
+    if permutations > 0:
+        perm_stats = np.empty(permutations, dtype=np.float64)
+
+        for i in range(permutations):
+            perm_grouping = np.random.permutation(grouping)
+            perm_stats[i] = test_stat_function(perm_grouping)
+
+        p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)
+
+    return stat, p_value
+
+
+def _build_results(method_name, test_stat_name, sample_size, num_groups, stat,
+                   p_value, permutations):
+    """Return ``pandas.Series`` containing results of statistical test."""
+    return pd.Series(
+        data=[method_name, test_stat_name, sample_size, num_groups, stat,
+              p_value, permutations],
+        index=['method name', 'test statistic name', 'sample size',
+               'number of groups', 'test statistic', 'p-value',
+               'number of permutations'],
+        name='%s results' % method_name)
+
+
+class CategoricalStats(object):
+    """Base class for categorical statistical methods.
+
+    Categorical statistical methods generally test for significant differences
+    between discrete groups of objects, as determined by a categorical variable
+    (grouping vector).
+
+    See Also
+    --------
+    ANOSIM
+    PERMANOVA
+
+    """
+
+    short_method_name = ''
+    long_method_name = ''
+    test_statistic_name = ''
+
+    def __init__(self, distance_matrix, grouping, column=None):
+        if not isinstance(distance_matrix, DistanceMatrix):
+            raise TypeError("Input must be a DistanceMatrix.")
+
+        if isinstance(grouping, pd.DataFrame):
+            if column is None:
+                raise ValueError("Must provide a column name if supplying a "
+                                 "data frame.")
+            else:
+                grouping = self._df_to_vector(distance_matrix, grouping,
+                                              column)
+        elif column is not None:
+            raise ValueError("Must provide a data frame if supplying a column "
+                             "name.")
+
+        if len(grouping) != distance_matrix.shape[0]:
+            raise ValueError("Grouping vector size must match the number of "
+                             "IDs in the distance matrix.")
+
+        # Find the group labels and convert grouping to an integer vector
+        # (factor).
+        groups, grouping = np.unique(grouping, return_inverse=True)
+
+        if len(groups) == len(grouping):
+            raise ValueError("All values in the grouping vector are unique. "
+                             "This method cannot operate on a grouping vector "
+                             "with only unique values (e.g., there are no "
+                             "'within' distances because each group of "
+                             "objects contains only a single object).")
+        if len(groups) == 1:
+            raise ValueError("All values in the grouping vector are the same. "
+                             "This method cannot operate on a grouping vector "
+                             "with only a single group of objects (e.g., "
+                             "there are no 'between' distances because there "
+                             "is only a single group).")
+
+        self._dm = distance_matrix
+        self._grouping = grouping
+        self._groups = groups
+        self._tri_idxs = np.triu_indices(self._dm.shape[0], k=1)
+
+    def _df_to_vector(self, distance_matrix, df, column):
+        """Return a grouping vector from a data frame column.
+
+        Parameters
+        ----------
+        distance_marix : DistanceMatrix
+            Distance matrix whose IDs will be mapped to group labels.
+        df : pandas.DataFrame
+            ``DataFrame`` (indexed by distance matrix ID).
+        column : str
+            Column name in `df` containing group labels.
+
+        Returns
+        -------
+        list
+            Grouping vector (vector of labels) based on the IDs in
+            `distance_matrix`. Each ID's label is looked up in the data frame
+            under the column specified by `column`.
+
+        Raises
+        ------
+        ValueError
+            If `column` is not in the data frame, or a distance matrix ID is
+            not in the data frame.
+
+        """
+        if column not in df:
+            raise ValueError("Column '%s' not in data frame." % column)
+
+        grouping = df.loc[distance_matrix.ids, column]
+        if grouping.isnull().any():
+            raise ValueError("One or more IDs in the distance matrix are not "
+                             "in the data frame.")
+        return grouping.tolist()
+
+    def __call__(self, permutations=999):
+        """Execute the statistical method.
+
+        Parameters
+        ----------
+        permutations : int, optional
+            Number of permutations to use when calculating statistical
+            significance. Must be >= 0. If 0, the resulting p-value will be
+            ``None``.
+
+        Returns
+        -------
+        CategoricalStatsResults
+            Results of the method, including test statistic and p-value.
+
+        .. shownumpydoc
+
+        """
+        if permutations < 0:
+            raise ValueError("Number of permutations must be greater than or "
+                             "equal to zero.")
+
+        stat = self._run(self._grouping)
+
+        p_value = None
+        if permutations > 0:
+            perm_stats = np.empty(permutations, dtype=np.float64)
+
+            for i in range(permutations):
+                perm_grouping = np.random.permutation(self._grouping)
+                perm_stats[i] = self._run(perm_grouping)
+
+            p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)
+
+        return CategoricalStatsResults(self.short_method_name,
+                                       self.long_method_name,
+                                       self.test_statistic_name,
+                                       self._dm.shape[0], self._groups, stat,
+                                       p_value, permutations)
+
+    def _run(self, grouping):
+        raise NotImplementedError("Subclasses must implement _run().")
+
+
+class CategoricalStatsResults(object):
+    """Statistical method results container.
+
+    .. note:: Deprecated in scikit-bio 0.2.1-dev
+       ``CategoricalStatsResults`` will be removed in scikit-bio 0.3.0. It is
+       replaced by ``pandas.Series`` for storing statistical method results.
+       Please update your code to use ``skbio.stats.distance.anosim`` or
+       ``skbio.stats.distance.permanova``, which will return a
+       ``pandas.Series``.
+
+    Stores the results of running a `CategoricalStats` method a single time,
+    and provides a way to format the results.
+
+    Attributes
+    ----------
+    short_method_name
+    long_method_name
+    test_statistic_name
+    sample_size
+    groups
+    statistic
+    p_value
+    permutations
+
+    Notes
+    -----
+    Users will generally not directly instantiate objects of this class. The
+    various categorical statistical methods will return an object of this type
+    when they are run.
+
+    """
+
+    def __init__(self, short_method_name, long_method_name,
+                 test_statistic_name, sample_size, groups, statistic, p_value,
+                 permutations):
+        warnings.warn(
+            "skbio.stats.distance.CategoricalStatsResults is deprecated and "
+            "will be removed in scikit-bio 0.3.0. Please update your code to "
+            "use either skbio.stats.distance.anosim or "
+            "skbio.stats.distance.permanova, which will return a "
+            "pandas.Series object.", DeprecationWarning)
+
+        self.short_method_name = short_method_name
+        self.long_method_name = long_method_name
+        self.test_statistic_name = test_statistic_name
+        self.sample_size = sample_size
+        self.groups = groups
+        self.statistic = statistic
+        self.p_value = p_value
+        self.permutations = permutations
+
+    def __str__(self):
+        """Return pretty-print (fixed width) string."""
+        rows = (self._format_header(), self._format_data())
+
+        max_widths = []
+        for col_idx in range(len(rows[0])):
+            max_widths.append(max(map(lambda e: len(e[col_idx]), rows)))
+
+        results = []
+        for row in rows:
+            padded_row = []
+            for col_idx, val in enumerate(row):
+                padded_row.append(val.rjust(max_widths[col_idx]))
+            results.append('  '.join(padded_row))
+
+        return '\n'.join(results) + '\n'
+
+    def _repr_html_(self):
+        """Return a string containing an HTML table of results.
+
+        This method will be called within the IPython Notebook instead of
+        __repr__ to display results.
+
+        """
+        header = self._format_header()
+        data = self._format_data()
+        return pd.DataFrame([data[1:]], columns=header[1:],
+                            index=[data[0]])._repr_html_()
+
+    def summary(self, delimiter='\t'):
+        """Return a formatted summary of results as a string.
+
+        The string is formatted as delimited text.
+
+        Parameters
+        ----------
+        delimiter : str, optional
+            String to delimit fields by in formatted output. Default is tab
+            (TSV).
+
+        Returns
+        -------
+        str
+            Delimited-text summary of results.
+
+        """
+        summary = StringIO()
+        csv_writer = csv.writer(summary, delimiter=delimiter,
+                                lineterminator='\n')
+        csv_writer.writerow(self._format_header())
+        csv_writer.writerow(self._format_data())
+        return summary.getvalue()
+
+    def _format_header(self):
+        return ('Method name', 'Sample size', 'Number of groups',
+                self.test_statistic_name, 'p-value', 'Number of permutations')
+
+    def _format_data(self):
+        p_value_str = p_value_to_str(self.p_value, self.permutations)
+
+        return (self.short_method_name, '%d' % self.sample_size,
+                '%d' % len(self.groups), str(self.statistic), p_value_str,
+                '%d' % self.permutations)
diff --git a/skbio/stats/distance/_bioenv.py b/skbio/stats/distance/_bioenv.py
new file mode 100644
index 0000000..3ce7ae6
--- /dev/null
+++ b/skbio/stats/distance/_bioenv.py
@@ -0,0 +1,243 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from itertools import combinations
+
+import numpy as np
+import pandas as pd
+from scipy.spatial.distance import pdist
+from scipy.stats import spearmanr
+
+from skbio.stats.distance import DistanceMatrix
+
+
+def bioenv(distance_matrix, data_frame, columns=None):
+    """Find subset of variables maximally correlated with distances.
+
+    Finds subsets of variables whose Euclidean distances (after scaling the
+    variables; see Notes section below for details) are maximally
+    rank-correlated with the distance matrix. For example, the distance matrix
+    might contain distances between communities, and the variables might be
+    numeric environmental variables (e.g., pH). Correlation between the
+    community distance matrix and Euclidean environmental distance matrix is
+    computed using Spearman's rank correlation coefficient (:math:`\\rho`).
+
+    Subsets of environmental variables range in size from 1 to the total number
+    of variables (inclusive). For example, if there are 3 variables, the "best"
+    variable subsets will be computed for subset sizes 1, 2, and 3.
+
+    The "best" subset is chosen by computing the correlation between the
+    community distance matrix and all possible Euclidean environmental distance
+    matrices at the given subset size. The combination of environmental
+    variables with maximum correlation is chosen as the "best" subset.
+
+    Parameters
+    ----------
+    distance_matrix : DistanceMatrix
+        Distance matrix containing distances between objects (e.g., distances
+        between samples of microbial communities).
+    data_frame : pandas.DataFrame
+        Contains columns of variables (e.g., numeric environmental variables
+        such as pH) associated with the objects in `distance_matrix`. Must be
+        indexed by the IDs in `distance_matrix` (i.e., the row labels must be
+        distance matrix IDs), but the order of IDs between `distance_matrix`
+        and `data_frame` need not be the same. All IDs in the distance matrix
+        must be present in `data_frame`. Extra IDs in `data_frame` are allowed
+        (they are ignored in the calculations).
+    columns : iterable of strs, optional
+        Column names in `data_frame` to include as variables in the
+        calculations. If not provided, defaults to all columns in `data_frame`.
+        The values in each column must be numeric or convertible to a numeric
+        type.
+
+    Returns
+    -------
+    pandas.DataFrame
+        Data frame containing the "best" subset of variables at each subset
+        size, as well as the correlation coefficient of each.
+
+    Raises
+    ------
+    TypeError
+        If invalid input types are provided, or if one or more specified
+        columns in `data_frame` are not numeric.
+    ValueError
+        If column name(s) or `distance_matrix` IDs cannot be found in
+        `data_frame`, if there is missing data (``NaN``) in the environmental
+        variables, or if the environmental variables cannot be scaled (e.g.,
+        due to zero variance).
+
+    See Also
+    --------
+    scipy.stats.spearmanr
+
+    Notes
+    -----
+    See [1]_ for the original method reference (originally called BIO-ENV).
+    The general algorithm and interface are similar to ``vegan::bioenv``,
+    available in R's vegan package [2]_. This method can also be found in
+    PRIMER-E [3]_ (originally called BIO-ENV, but is now called BEST).
+
+    .. warning:: This method can take a *long* time to run if a large number of
+       variables are specified, as all possible subsets are evaluated at each
+       subset size.
+
+    The variables are scaled before computing the Euclidean distance: each
+    column is centered and then scaled by its standard deviation.
+
+    References
+    ----------
+    .. [1] Clarke, K. R & Ainsworth, M. 1993. "A method of linking multivariate
+       community structure to environmental variables". Marine Ecology Progress
+       Series, 92, 205-219.
+
+    .. [2] http://cran.r-project.org/web/packages/vegan/index.html
+
+    .. [3] http://www.primer-e.com/primer.htm
+
+    Examples
+    --------
+    Import the functionality we'll use in the following examples. The call to
+    ``pd.set_option`` ensures consistent data frame formatting across
+    different versions of pandas. This call is not necessary for normal
+    use; it is only included here so that the doctests will pass.
+
+    >>> import pandas as pd
+    >>> from skbio import DistanceMatrix
+    >>> from skbio.stats.distance import bioenv
+    >>> try:
+    ...     # not necessary for normal use
+    ...     pd.set_option('show_dimensions', True)
+    ... except KeyError:
+    ...     pass
+
+    Load a 4x4 community distance matrix:
+
+    >>> dm = DistanceMatrix([[0.0, 0.5, 0.25, 0.75],
+    ...                      [0.5, 0.0, 0.1, 0.42],
+    ...                      [0.25, 0.1, 0.0, 0.33],
+    ...                      [0.75, 0.42, 0.33, 0.0]],
+    ...                     ['A', 'B', 'C', 'D'])
+
+    Load a ``pandas.DataFrame`` with two environmental variables, pH and
+    elevation:
+
+    >>> df = pd.DataFrame([[7.0, 400],
+    ...                    [8.0, 530],
+    ...                    [7.5, 450],
+    ...                    [8.5, 810]],
+    ...                   index=['A','B','C','D'],
+    ...                   columns=['pH', 'Elevation'])
+
+    Note that the data frame is indexed with the same IDs (``'A'``, ``'B'``,
+    ``'C'``, and ``'D'``) that are in the distance matrix. This is necessary in
+    order to link the environmental variables (metadata) to each of the objects
+    in the distance matrix. In this example, the IDs appear in the same order
+    in both the distance matrix and data frame, but this is not necessary.
+
+    Find the best subsets of environmental variables that are correlated with
+    community distances:
+
+    >>> bioenv(dm, df) # doctest: +NORMALIZE_WHITESPACE
+                   size  correlation
+    vars
+    pH                1     0.771517
+    pH, Elevation     2     0.714286
+    <BLANKLINE>
+    [2 rows x 2 columns]
+
+    We see that in this simple example, pH alone is maximally rank-correlated
+    with the community distances (:math:`\\rho=0.771517`).
+
+    """
+    if not isinstance(distance_matrix, DistanceMatrix):
+        raise TypeError("Must provide a DistanceMatrix as input.")
+    if not isinstance(data_frame, pd.DataFrame):
+        raise TypeError("Must provide a pandas.DataFrame as input.")
+
+    if columns is None:
+        columns = data_frame.columns.values.tolist()
+
+    if len(set(columns)) != len(columns):
+        raise ValueError("Duplicate column names are not supported.")
+
+    if len(columns) < 1:
+        raise ValueError("Must provide at least one column.")
+
+    for column in columns:
+        if column not in data_frame:
+            raise ValueError("Column '%s' not in data frame." % column)
+
+    # Subset and order the vars data frame to match the IDs in the distance
+    # matrix, only keeping the specified columns.
+    vars_df = data_frame.loc[distance_matrix.ids, columns]
+
+    if vars_df.isnull().any().any():
+        raise ValueError("One or more IDs in the distance matrix are not "
+                         "in the data frame, or there is missing data in the "
+                         "data frame.")
+
+    try:
+        vars_df = vars_df.astype(float)
+    except ValueError:
+        raise TypeError("All specified columns in the data frame must be "
+                        "numeric.")
+
+    # Scale the vars and extract the underlying numpy array from the data
+    # frame. We mainly do this for performance as we'll be taking subsets of
+    # columns within a tight loop and using a numpy array ends up being ~2x
+    # faster.
+    vars_array = _scale(vars_df).values
+    dm_flat = distance_matrix.condensed_form()
+
+    num_vars = len(columns)
+    var_idxs = np.arange(num_vars)
+
+    # For each subset size, store the best combination of variables:
+    #     (string identifying best vars, subset size, rho)
+    max_rhos = np.empty(num_vars, dtype=[('vars', object),
+                                         ('size', int),
+                                         ('correlation', float)])
+    for subset_size in range(1, num_vars + 1):
+        max_rho = None
+        for subset_idxs in combinations(var_idxs, subset_size):
+            # Compute Euclidean distances using the current subset of
+            # variables. pdist returns the distances in condensed form.
+            vars_dm_flat = pdist(vars_array[:, subset_idxs],
+                                 metric='euclidean')
+            rho = spearmanr(dm_flat, vars_dm_flat)[0]
+
+            # If there are ties for the best rho at a given subset size, choose
+            # the first one in order to match vegan::bioenv's behavior.
+            if max_rho is None or rho > max_rho[0]:
+                max_rho = (rho, subset_idxs)
+
+        vars_label = ', '.join([columns[i] for i in max_rho[1]])
+        max_rhos[subset_size - 1] = (vars_label, subset_size, max_rho[0])
+
+    return pd.DataFrame.from_records(max_rhos, index='vars')
+
+
+def _scale(df):
+    """Center and scale each column in a data frame.
+
+    Each column is centered (by subtracting the mean) and then scaled by its
+    standard deviation.
+
+    """
+    # Modified from http://stackoverflow.com/a/18005745
+    df = df.copy()
+    df -= df.mean()
+    df /= df.std()
+
+    if df.isnull().any().any():
+        raise ValueError("Column(s) in the data frame could not be scaled, "
+                         "likely because the column(s) had no variance.")
+    return df
diff --git a/skbio/stats/distance/_mantel.py b/skbio/stats/distance/_mantel.py
new file mode 100644
index 0000000..e7376a4
--- /dev/null
+++ b/skbio/stats/distance/_mantel.py
@@ -0,0 +1,490 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import zip
+
+from itertools import combinations
+
+import six
+import numpy as np
+import pandas as pd
+import scipy.misc
+from scipy.stats import pearsonr, spearmanr
+
+from skbio.stats.distance import DistanceMatrix
+
+
+def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
+           strict=True, lookup=None):
+    """Compute correlation between distance matrices using the Mantel test.
+
+    The Mantel test compares two distance matrices by computing the correlation
+    between the distances in the lower (or upper) triangular portions of the
+    symmetric distance matrices. Correlation can be computed using Pearson's
+    product-moment correlation coefficient or Spearman's rank correlation
+    coefficient.
+
+    As defined in [1]_, the Mantel test computes a test statistic :math:`r_M`
+    given two symmetric distance matrices :math:`D_X` and :math:`D_Y`.
+    :math:`r_M` is defined as
+
+    .. math::
+
+       r_M=\\frac{1}{d-1}\\sum_{i=1}^{n-1}\\sum_{j=i+1}^{n}
+       stand(D_X)_{ij}stand(D_Y)_{ij}
+
+    where
+
+    .. math::
+
+       d=\\frac{n(n-1)}{2}
+
+    and :math:`n` is the number of rows/columns in each of the distance
+    matrices. :math:`stand(D_X)` and :math:`stand(D_Y)` are distance matrices
+    with their upper triangles containing standardized distances. Note that
+    since :math:`D_X` and :math:`D_Y` are symmetric, the lower triangular
+    portions of the matrices could equivalently have been used instead of the
+    upper triangular portions (the current function behaves in this manner).
+
+    If ``method='spearman'``, the above equation operates on ranked distances
+    instead of the original distances.
+
+    Statistical significance is assessed via a permutation test. The rows and
+    columns of the first distance matrix (`x`) are randomly permuted a
+    number of times (controlled via `permutations`). A correlation coefficient
+    is computed for each permutation and the p-value is the proportion of
+    permuted correlation coefficients that are equal to or more extreme
+    than the original (unpermuted) correlation coefficient. Whether a permuted
+    correlation coefficient is "more extreme" than the original correlation
+    coefficient depends on the alternative hypothesis (controlled via
+    `alternative`).
+
+    Parameters
+    ----------
+    x, y : DistanceMatrix or array_like
+        Input distance matrices to compare. If `x` and `y` are both
+        ``DistanceMatrix`` instances, they will be reordered based on matching
+        IDs (see `strict` and `lookup` below for handling matching/mismatching
+        IDs); thus they are not required to be in the same ID order. If `x` and
+        `y` are ``array_like``, no reordering is applied and both matrices must
+        have the same shape. In either case, `x` and `y` must be at least 3x3
+        in size *after* reordering and matching of IDs.
+    method : {'pearson', 'spearman'}
+        Method used to compute the correlation between distance matrices.
+    permutations : int, optional
+        Number of times to randomly permute `x` when assessing statistical
+        significance. Must be greater than or equal to zero. If zero,
+        statistical significance calculations will be skipped and the p-value
+        will be ``np.nan``.
+    alternative : {'two-sided', 'greater', 'less'}
+        Alternative hypothesis to use when calculating statistical
+        significance. The default ``'two-sided'`` alternative hypothesis
+        calculates the proportion of permuted correlation coefficients whose
+        magnitude (i.e. after taking the absolute value) is greater than or
+        equal to the absolute value of the original correlation coefficient.
+        ``'greater'`` calculates the proportion of permuted coefficients that
+        are greater than or equal to the original coefficient. ``'less'``
+        calculates the proportion of permuted coefficients that are less than
+        or equal to the original coefficient.
+    strict : bool, optional
+        If ``True``, raises a ``ValueError`` if IDs are found that do not exist
+        in both distance matrices. If ``False``, any nonmatching IDs are
+        discarded before running the test. See `n` (in Returns section below)
+        for the number of matching IDs that were used in the test. This
+        parameter is ignored if `x` and `y` are ``array_like``.
+    lookup : dict, optional
+        Maps each ID in the distance matrices to a new ID. Used to match up IDs
+        across distance matrices prior to running the Mantel test. If the IDs
+        already match between the distance matrices, this parameter is not
+        necessary. This parameter is disallowed if `x` and `y` are
+        ``array_like``.
+
+    Returns
+    -------
+    corr_coeff : float
+        Correlation coefficient of the test (depends on `method`).
+    p_value : float
+        p-value of the test.
+    n : int
+        Number of rows/columns in each of the distance matrices, after any
+        reordering/matching of IDs. If ``strict=False``, nonmatching IDs may
+        have been discarded from one or both of the distance matrices prior to
+        running the Mantel test, so this value may be important as it indicates
+        the *actual* size of the matrices that were compared.
+
+    Raises
+    ------
+    ValueError
+        If `x` and `y` are not at least 3x3 in size after reordering/matching
+        of IDs, or an invalid `method`, number of `permutations`, or
+        `alternative` are provided.
+    TypeError
+        If `x` and `y` are not both ``DistanceMatrix`` instances or
+        ``array_like``.
+
+    See Also
+    --------
+    DistanceMatrix
+    scipy.stats.pearsonr
+    scipy.stats.spearmanr
+    pwmantel
+
+    Notes
+    -----
+    The Mantel test was first described in [2]_. The general algorithm and
+    interface are similar to ``vegan::mantel``, available in R's vegan
+    package [3]_.
+
+    ``np.nan`` will be returned for the p-value if `permutations` is zero or if
+    the correlation coefficient is ``np.nan``. The correlation coefficient will
+    be ``np.nan`` if one or both of the inputs does not have any variation
+    (i.e. the distances are all constant) and ``method='spearman'``.
+
+    References
+    ----------
+    .. [1] Legendre, P. and Legendre, L. (2012) Numerical Ecology. 3rd English
+       Edition. Elsevier.
+
+    .. [2] Mantel, N. (1967). "The detection of disease clustering and a
+       generalized regression approach". Cancer Research 27 (2): 209-220. PMID
+       6018555.
+
+    .. [3] http://cran.r-project.org/web/packages/vegan/index.html
+
+    Examples
+    --------
+    Import the functionality we'll use in the following examples:
+
+    >>> from skbio import DistanceMatrix
+    >>> from skbio.stats.distance import mantel
+
+    Define two 3x3 distance matrices:
+
+    >>> x = DistanceMatrix([[0, 1, 2],
+    ...                     [1, 0, 3],
+    ...                     [2, 3, 0]])
+    >>> y = DistanceMatrix([[0, 2, 7],
+    ...                     [2, 0, 6],
+    ...                     [7, 6, 0]])
+
+    Compute the Pearson correlation between them and assess significance using
+    a two-sided test with 999 permutations:
+
+    >>> coeff, p_value, n = mantel(x, y)
+    >>> round(coeff, 4)
+    0.7559
+
+    Thus, we see a moderate-to-strong positive correlation (:math:`r_M=0.7559`)
+    between the two matrices.
+
+    In the previous example, the distance matrices (``x`` and ``y``) have the
+    same IDs, in the same order:
+
+    >>> x.ids
+    ('0', '1', '2')
+    >>> y.ids
+    ('0', '1', '2')
+
+    If necessary, ``mantel`` will reorder the distance matrices prior to
+    running the test. The function also supports a ``lookup`` dictionary that
+    maps distance matrix IDs to new IDs, providing a way to match IDs between
+    distance matrices prior to running the Mantel test.
+
+    For example, let's reassign the distance matrices' IDs so that there are no
+    matching IDs between them:
+
+    >>> x.ids = ('a', 'b', 'c')
+    >>> y.ids = ('d', 'e', 'f')
+
+    If we rerun ``mantel``, we get the following error notifying us that there
+    are nonmatching IDs (this is the default behavior with ``strict=True``):
+
+    >>> mantel(x, y)
+    Traceback (most recent call last):
+        ...
+    ValueError: IDs exist that are not in both distance matrices.
+
+    If we pass ``strict=False`` to ignore/discard nonmatching IDs, we see that
+    no matches exist between `x` and `y`, so the Mantel test still cannot be
+    run:
+
+    >>> mantel(x, y, strict=False)
+    Traceback (most recent call last):
+        ...
+    ValueError: No matching IDs exist between the distance matrices.
+
+    To work around this, we can define a ``lookup`` dictionary to specify how
+    the IDs should be matched between distance matrices:
+
+    >>> lookup = {'a': 'A', 'b': 'B', 'c': 'C',
+    ...           'd': 'A', 'e': 'B', 'f': 'C'}
+
+    ``lookup`` maps each ID to ``'A'``, ``'B'``, or ``'C'``. If we rerun
+    ``mantel`` with ``lookup``, we get the same results as the original
+    example where all distance matrix IDs matched:
+
+    >>> coeff, p_value, n = mantel(x, y, lookup=lookup)
+    >>> round(coeff, 4)
+    0.7559
+
+    ``mantel`` also accepts input that is ``array_like``. For example, if we
+    redefine `x` and `y` as nested Python lists instead of ``DistanceMatrix``
+    instances, we obtain the same result:
+
+    >>> x = [[0, 1, 2],
+    ...      [1, 0, 3],
+    ...      [2, 3, 0]]
+    >>> y = [[0, 2, 7],
+    ...      [2, 0, 6],
+    ...      [7, 6, 0]]
+    >>> coeff, p_value, n = mantel(x, y)
+    >>> round(coeff, 4)
+    0.7559
+
+    It is import to note that reordering/matching of IDs (and hence the
+    ``strict`` and ``lookup`` parameters) do not apply when input is
+    ``array_like`` because there is no notion of IDs.
+
+    """
+    if method == 'pearson':
+        corr_func = pearsonr
+    elif method == 'spearman':
+        corr_func = spearmanr
+    else:
+        raise ValueError("Invalid correlation method '%s'." % method)
+
+    if permutations < 0:
+        raise ValueError("Number of permutations must be greater than or "
+                         "equal to zero.")
+    if alternative not in ('two-sided', 'greater', 'less'):
+        raise ValueError("Invalid alternative hypothesis '%s'." % alternative)
+
+    x, y = _order_dms(x, y, strict=strict, lookup=lookup)
+
+    n = x.shape[0]
+    if n < 3:
+        raise ValueError("Distance matrices must have at least 3 matching IDs "
+                         "between them (i.e., minimum 3x3 in size).")
+
+    x_flat = x.condensed_form()
+    y_flat = y.condensed_form()
+
+    orig_stat = corr_func(x_flat, y_flat)[0]
+
+    if permutations == 0 or np.isnan(orig_stat):
+        p_value = np.nan
+    else:
+        perm_gen = (corr_func(x.permute(condensed=True), y_flat)[0]
+                    for _ in range(permutations))
+        permuted_stats = np.fromiter(perm_gen, np.float, count=permutations)
+
+        if alternative == 'two-sided':
+            count_better = (np.absolute(permuted_stats) >=
+                            np.absolute(orig_stat)).sum()
+        elif alternative == 'greater':
+            count_better = (permuted_stats >= orig_stat).sum()
+        else:
+            count_better = (permuted_stats <= orig_stat).sum()
+
+        p_value = (count_better + 1) / (permutations + 1)
+
+    return orig_stat, p_value, n
+
+
+def pwmantel(dms, labels=None, method='pearson', permutations=999,
+             alternative='two-sided', strict=True, lookup=None):
+    """Run Mantel tests for every pair of given distance matrices.
+
+    Runs a Mantel test for each pair of distance matrices and collates the
+    results in a ``DataFrame``. Distance matrices do not need to be in the same
+    ID order if they are ``DistanceMatrix`` instances. Distance matrices will
+    be re-ordered prior to running each pairwise test, and if ``strict=False``,
+    IDs that don't match between a pair of distance matrices will be dropped
+    prior to running the test (otherwise a ``ValueError`` will be raised if
+    there are nonmatching IDs between any pair of distance matrices).
+
+    Parameters
+    ----------
+    dms : iterable of DistanceMatrix objects, array_like objects, or filepaths
+        to distance matrices. If they are ``array_like``, no reordering or
+        matching of IDs will be performed.
+    labels : iterable of str or int, optional
+        Labels for each distance matrix in `dms`. These are used in the results
+        ``DataFrame`` to identify the pair of distance matrices used in a
+        pairwise Mantel test. If ``None``, defaults to monotonically-increasing
+        integers starting at zero.
+    method : {'pearson', 'spearman'}
+        Correlation method. See ``mantel`` function for more details.
+    permutations : int, optional
+        Number of permutations. See ``mantel`` function for more details.
+    alternative : {'two-sided', 'greater', 'less'}
+        Alternative hypothesis. See ``mantel`` function for more details.
+    strict : bool, optional
+        Handling of nonmatching IDs. See ``mantel`` function for more details.
+    lookup : dict, optional
+        Map existing IDs to new IDs. See ``mantel`` function for more details.
+
+    Returns
+    -------
+    pandas.DataFrame
+        ``DataFrame`` containing the results of each pairwise test (one per
+        row). Includes the number of objects considered in each test as column
+        ``n`` (after applying `lookup` and filtering nonmatching IDs if
+        ``strict=False``). Column ``p-value`` will display p-values as ``NaN``
+        if p-values could not be computed (they are stored as ``np.nan`` within
+        the ``DataFrame``; see ``mantel`` for more details).
+
+    See Also
+    --------
+    mantel
+    DistanceMatrix.read
+
+    Notes
+    --------
+    Passing a list of filepaths can be useful as it allows for a smaller amount
+    of memory consumption as it only loads two matrices at a time as opposed to
+    loading all distance matrices into memory.
+
+    Examples
+    --------
+    Import the functionality we'll use in the following examples. The call to
+    ``pd.set_option`` ensures consistent ``DataFrame`` formatting across
+    different versions of pandas. This call is not necessary for normal
+    use; it is only included here so that the doctests will pass.
+
+    >>> import pandas as pd
+    >>> from skbio import DistanceMatrix
+    >>> from skbio.stats.distance import pwmantel
+    >>> try:
+    ...     # not necessary for normal use
+    ...     pd.set_option('show_dimensions', True)
+    ... except KeyError:
+    ...     pass
+
+    Define three 3x3 distance matrices:
+
+    >>> x = DistanceMatrix([[0, 1, 2],
+    ...                     [1, 0, 3],
+    ...                     [2, 3, 0]])
+    >>> y = DistanceMatrix([[0, 2, 7],
+    ...                     [2, 0, 6],
+    ...                     [7, 6, 0]])
+    >>> z = DistanceMatrix([[0, 5, 6],
+    ...                     [5, 0, 1],
+    ...                     [6, 1, 0]])
+
+    Run Mantel tests for each pair of distance matrices (there are 3 possible
+    pairs):
+
+    >>> pwmantel((x, y, z), labels=('x', 'y', 'z'),
+    ...          permutations=0) # doctest: +NORMALIZE_WHITESPACE
+                 statistic p-value  n   method  permutations alternative
+    dm1 dm2
+    x   y     0.755929     NaN  3  pearson             0   two-sided
+        z    -0.755929     NaN  3  pearson             0   two-sided
+    y   z    -0.142857     NaN  3  pearson             0   two-sided
+    <BLANKLINE>
+    [3 rows x 6 columns]
+
+    Note that we passed ``permutations=0`` to suppress significance tests; the
+    p-values in the output are labelled ``NaN``.
+
+    """
+    num_dms = len(dms)
+
+    if num_dms < 2:
+        raise ValueError("Must provide at least two distance matrices.")
+
+    if labels is None:
+        labels = range(num_dms)
+    else:
+        if num_dms != len(labels):
+            raise ValueError("Number of labels must match the number of "
+                             "distance matrices.")
+        if len(set(labels)) != len(labels):
+            raise ValueError("Labels must be unique.")
+
+    num_combs = scipy.misc.comb(num_dms, 2, exact=True)
+    results_dtype = [('dm1', object), ('dm2', object), ('statistic', float),
+                     ('p-value', float), ('n', int), ('method', object),
+                     ('permutations', int), ('alternative', object)]
+    results = np.empty(num_combs, dtype=results_dtype)
+
+    for i, pair in enumerate(combinations(zip(labels, dms), 2)):
+        (xlabel, x), (ylabel, y) = pair
+        if isinstance(x, six.string_types):
+            x = DistanceMatrix.read(x)
+        if isinstance(y, six.string_types):
+            y = DistanceMatrix.read(y)
+
+        stat, p_val, n = mantel(x, y, method=method, permutations=permutations,
+                                alternative=alternative, strict=strict,
+                                lookup=lookup)
+
+        results[i] = (xlabel, ylabel, stat, p_val, n, method, permutations,
+                      alternative)
+
+    return pd.DataFrame.from_records(results, index=('dm1', 'dm2'))
+
+
+def _order_dms(x, y, strict=True, lookup=None):
+    """Intersect distance matrices and put them in the same order."""
+    x_is_dm = isinstance(x, DistanceMatrix)
+    y_is_dm = isinstance(y, DistanceMatrix)
+
+    if (x_is_dm and not y_is_dm) or (y_is_dm and not x_is_dm):
+        raise TypeError(
+            "Mixing DistanceMatrix and array_like input types is not "
+            "supported. Both x and y must either be DistanceMatrix instances "
+            "or array_like, but not mixed.")
+    elif x_is_dm and y_is_dm:
+        if lookup is not None:
+            x = _remap_ids(x, lookup, 'x', 'first')
+            y = _remap_ids(y, lookup, 'y', 'second')
+
+        id_order = [id_ for id_ in x.ids if id_ in y]
+        num_matches = len(id_order)
+
+        if (strict and ((num_matches != len(x.ids)) or
+                        (num_matches != len(y.ids)))):
+            raise ValueError("IDs exist that are not in both distance "
+                             "matrices.")
+
+        if num_matches < 1:
+            raise ValueError("No matching IDs exist between the distance "
+                             "matrices.")
+
+        return x.filter(id_order), y.filter(id_order)
+    else:
+        # Both x and y aren't DistanceMatrix instances.
+        if lookup is not None:
+            raise ValueError("ID lookup can only be provided if inputs are "
+                             "DistanceMatrix instances.")
+
+        x = DistanceMatrix(x)
+        y = DistanceMatrix(y)
+
+        if x.shape != y.shape:
+            raise ValueError("Distance matrices must have the same shape.")
+
+        return x, y
+
+
+def _remap_ids(dm, lookup, label, order):
+    "Return a copy of `dm` with its IDs remapped based on `lookup`."""
+    try:
+        remapped_ids = [lookup[id_] for id_ in dm.ids]
+    except KeyError as e:
+        raise KeyError("All IDs in the %s distance matrix (%s) must be in "
+                       "the lookup. Missing ID: %s" % (order, label, str(e)))
+
+    # Create a copy as we'll be modifying the IDs in place.
+    dm_copy = dm.copy()
+    dm_copy.ids = remapped_ids
+    return dm_copy
diff --git a/skbio/stats/distance/_permanova.py b/skbio/stats/distance/_permanova.py
new file mode 100644
index 0000000..1ec31f6
--- /dev/null
+++ b/skbio/stats/distance/_permanova.py
@@ -0,0 +1,221 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range
+
+import warnings
+from functools import partial
+
+import numpy as np
+
+from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results,
+                    CategoricalStats)
+
+
+def permanova(distance_matrix, grouping, column=None, permutations=999):
+    """Test for significant differences between groups using PERMANOVA.
+
+    Permutational Multivariate Analysis of Variance (PERMANOVA) is a
+    non-parametric method that tests whether two or more groups of objects
+    (e.g., samples) are significantly different based on a categorical factor.
+    It is conceptually similar to ANOVA except that it operates on a distance
+    matrix, which allows for multivariate analysis. PERMANOVA computes a
+    pseudo-F statistic.
+
+    Statistical significance is assessed via a permutation test. The assignment
+    of objects to groups (`grouping`) is randomly permuted a number of times
+    (controlled via `permutations`). A pseudo-F statistic is computed for each
+    permutation and the p-value is the proportion of permuted pseudo-F
+    statisics that are equal to or greater than the original (unpermuted)
+    pseudo-F statistic.
+
+    Parameters
+    ----------
+    distance_matrix : DistanceMatrix
+        Distance matrix containing distances between objects (e.g., distances
+        between samples of microbial communities).
+    grouping : 1-D array_like or pandas.DataFrame
+        Vector indicating the assignment of objects to groups. For example,
+        these could be strings or integers denoting which group an object
+        belongs to. If `grouping` is 1-D ``array_like``, it must be the same
+        length and in the same order as the objects in `distance_matrix`. If
+        `grouping` is a ``DataFrame``, the column specified by `column` will be
+        used as the grouping vector. The ``DataFrame`` must be indexed by the
+        IDs in `distance_matrix` (i.e., the row labels must be distance matrix
+        IDs), but the order of IDs between `distance_matrix` and the
+        ``DataFrame`` need not be the same. All IDs in the distance matrix must
+        be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
+        allowed (they are ignored in the calculations).
+    column : str, optional
+        Column name to use as the grouping vector if `grouping` is a
+        ``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
+        Cannot be provided if `grouping` is 1-D ``array_like``.
+    permutations : int, optional
+        Number of permutations to use when assessing statistical
+        significance. Must be greater than or equal to zero. If zero,
+        statistical significance calculations will be skipped and the p-value
+        will be ``np.nan``.
+
+    Returns
+    -------
+    pandas.Series
+        Results of the statistical test, including ``test statistic`` and
+        ``p-value``.
+
+    See Also
+    --------
+    anosim
+
+    Notes
+    -----
+    See [1]_ for the original method reference, as well as ``vegan::adonis``,
+    available in R's vegan package [2]_.
+
+    The p-value will be ``np.nan`` if `permutations` is zero.
+
+    References
+    ----------
+    .. [1] Anderson, Marti J. "A new method for non-parametric multivariate
+       analysis of variance." Austral Ecology 26.1 (2001): 32-46.
+
+    .. [2] http://cran.r-project.org/web/packages/vegan/index.html
+
+    Examples
+    --------
+    See :mod:`skbio.stats.distance.anosim` for usage examples (both functions
+    provide similar interfaces).
+
+    """
+    sample_size, num_groups, grouping, tri_idxs, distances = _preprocess_input(
+        distance_matrix, grouping, column)
+
+    # Calculate number of objects in each group.
+    group_sizes = np.bincount(grouping)
+    s_T = (distances ** 2).sum() / sample_size
+
+    test_stat_function = partial(_compute_f_stat, sample_size, num_groups,
+                                 tri_idxs, distances, group_sizes, s_T)
+    stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
+                                           permutations)
+
+    return _build_results('PERMANOVA', 'pseudo-F', sample_size, num_groups,
+                          stat, p_value, permutations)
+
+
+def _compute_f_stat(sample_size, num_groups, tri_idxs, distances, group_sizes,
+                    s_T, grouping):
+    """Compute PERMANOVA pseudo-F statistic."""
+    # Create a matrix where objects in the same group are marked with the group
+    # index (e.g. 0, 1, 2, etc.). objects that are not in the same group are
+    # marked with -1.
+    grouping_matrix = -1 * np.ones((sample_size, sample_size), dtype=int)
+    for group_idx in range(num_groups):
+        within_indices = _index_combinations(
+            np.where(grouping == group_idx)[0])
+        grouping_matrix[within_indices] = group_idx
+
+    # Extract upper triangle (in same order as distances were extracted
+    # from full distance matrix).
+    grouping_tri = grouping_matrix[tri_idxs]
+
+    # Calculate s_W for each group, accounting for different group sizes.
+    s_W = 0
+    for i in range(num_groups):
+        s_W += (distances[grouping_tri == i] ** 2).sum() / group_sizes[i]
+
+    s_A = s_T - s_W
+    return (s_A / (num_groups - 1)) / (s_W / (sample_size - num_groups))
+
+
+def _index_combinations(indices):
+    # Modified from http://stackoverflow.com/a/11144716
+    return np.tile(indices, len(indices)), np.repeat(indices, len(indices))
+
+
+class PERMANOVA(CategoricalStats):
+    """PERMANOVA statistical method executor.
+
+    .. note:: Deprecated in scikit-bio 0.2.1-dev
+       ``PERMANOVA`` will be removed in scikit-bio 0.3.0. It is replaced by
+       ``permanova``, which provides a simpler procedural interface to running
+       this statistical method.
+
+    Permutational Multivariate Analysis of Variance (PERMANOVA) is a
+    non-parametric method that tests whether two or more groups of objects are
+    significantly different based on a categorical factor. It is conceptually
+    similar to ANOVA except that it operates on a distance matrix, which allows
+    for multivariate analysis. PERMANOVA computes a pseudo-F statistic and
+    tests the significance through permutations.
+
+    Notes
+    -----
+    See [1]_ for the original PERMANOVA reference, as well as
+    ``vegan::adonis``, available in R's vegan package [2]_.
+
+    References
+    ----------
+    .. [1] Anderson, Marti J. "A new method for non-parametric multivariate
+       analysis of variance." Austral Ecology 26.1 (2001): 32-46.
+
+    .. [2] http://cran.r-project.org/web/packages/vegan/index.html
+
+    """
+
+    short_method_name = 'PERMANOVA'
+    long_method_name = 'Permutational Multivariate Analysis of Variance'
+    test_statistic_name = 'pseudo-F statistic'
+
+    def __init__(self, distance_matrix, grouping, column=None):
+        warnings.warn(
+            "skbio.stats.distance.PERMANOVA is deprecated and will be removed "
+            "in scikit-bio 0.3.0. Please update your code to use "
+            "skbio.stats.distance.permanova.", DeprecationWarning)
+
+        super(PERMANOVA, self).__init__(distance_matrix, grouping,
+                                        column=column)
+
+        # Calculate number of objects in each group.
+        self._group_sizes = np.bincount(self._grouping)
+        self._num_groups = len(self._groups)
+        self._distances = self._dm.condensed_form()
+        self._s_T = (self._distances ** 2).sum() / self._dm.shape[0]
+
+    def _run(self, grouping):
+        """Compute PERMANOVA pseudo-F statistic."""
+        # Create a matrix where objects in the same group are marked with the
+        # group index (e.g. 0, 1, 2, etc.). objects that are not in the same
+        # group are marked with -1.
+        grouping_matrix = -1 * np.ones(self._dm.shape, dtype=int)
+        for group_idx in range(len(self._groups)):
+            within_indices = self._index_combinations(
+                np.where(grouping == group_idx)[0])
+            grouping_matrix[within_indices] = group_idx
+
+        # Extract upper triangle (in same order as distances were extracted
+        # from full distance matrix).
+        grouping_tri = grouping_matrix[self._tri_idxs]
+
+        return self._compute_f_stat(grouping_tri)
+
+    def _index_combinations(self, indices):
+        # Modified from http://stackoverflow.com/a/11144716
+        return np.tile(indices, len(indices)), np.repeat(indices, len(indices))
+
+    def _compute_f_stat(self, grouping_tri):
+        a = self._num_groups
+        N = self._dm.shape[0]
+
+        # Calculate s_W for each group, accounting for different group sizes.
+        s_W = 0
+        for i in range(a):
+            s_W += ((self._distances[grouping_tri == i] ** 2).sum() /
+                    self._group_sizes[i])
+
+        s_A = self._s_T - s_W
+        return (s_A / (a - 1)) / (s_W / (N - a))
diff --git a/skbio/stats/distance/tests/__init__.py b/skbio/stats/distance/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/stats/distance/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/stats/distance/tests/data/bioenv_df_vegan.txt b/skbio/stats/distance/tests/data/bioenv_df_vegan.txt
new file mode 100644
index 0000000..39e9926
--- /dev/null
+++ b/skbio/stats/distance/tests/data/bioenv_df_vegan.txt
@@ -0,0 +1,25 @@
+#SampleID	log(N)	P	K	Ca	pH	Al
+18	2.98568193770049	42.1	139.9	519.4	2.7	39
+15	2.59525470695687	39.1	167.3	356.7	2.8	88.1
+24	3.00568260440716	67.7	207.1	973.3	3	138
+27	3.02529107579554	60.8	233.7	834	2.8	15.4
+23	3.16968558067743	54.5	180.6	777	2.7	24.2
+19	3.1267605359604	40.9	171.4	691.8	2.7	104.8
+22	3.28091121578765	36.7	171.4	738.6	2.8	20.7
+16	3.18635263316264	31	138.2	394.6	2.8	74.2
+28	3.39450839351136	73.5	260	748.6	2.8	17.9
+13	3.3357695763397	40.5	313.8	540.7	2.8	329.7
+14	3.08190996979504	38.1	146.8	512.2	2.7	92.3
+20	3.26575941076705	61.9	202.2	741.2	2.9	124.3
+25	3.1267605359604	50.6	151.7	648	2.9	12.1
+7	3.41772668361337	24.6	78.7	188.5	3.1	294.9
+5	3.49953328238302	22.7	43.6	240.3	3.1	39
+6	2.94968833505258	26.4	61.1	259.1	3	155.1
+3	3.43720781918519	32.3	73.7	219	3.3	304.6
+4	2.89037175789616	64.9	224.5	517.6	2.9	435.1
+2	3.10458667846607	47.4	165.9	436.1	2.9	316.5
+9	2.70805020110221	48.4	127.4	499.6	3	227.1
+12	2.77258872223978	32.7	126.4	471.4	2.9	108.8
+10	2.66025953726586	62.8	215.2	709.7	3.2	168.2
+11	2.81540871942271	55.8	205.3	1169.7	3.6	253.6
+21	3.04452243772342	26.5	104.4	484.8	3	35.8
diff --git a/skbio/stats/distance/tests/data/bioenv_dm_vegan.txt b/skbio/stats/distance/tests/data/bioenv_dm_vegan.txt
new file mode 100644
index 0000000..21bca7f
--- /dev/null
+++ b/skbio/stats/distance/tests/data/bioenv_dm_vegan.txt
@@ -0,0 +1,25 @@
+	18	15	24	27	23	19	22	16	28	13	14	20	25	7	5	6	3	4	2	9	12	10	11	21
+18	0	0.452897212693772	0.715360803826621	0.624766305873718	0.493154219155204	0.43865006075341	0.432340187193151	0.440822999771334	0.698420319937299	0.46351050707088	0.523211654445006	0.408294556891784	0.604911324328014	0.434178076197442	0.545291972003212	0.393272103797605	0.330797959207167	0.563648430837013	0.66523173799306	0.511183841155759	0.46804467412999	0.515485401604058	0.580143332456965	0.668700755517761
+15	0.452897212693772	0	0.611610336420561	0.655733483106344	0.410315012072556	0.453712867932398	0.424523404131089	0.377720269664141	0.654941298927269	0.417480326012996	0.35798188425419	0.360483800701559	0.589951055270385	0.532758773890324	0.584210632971972	0.37822819560664	0.53024185913761	0.584066987502168	0.778451630131581	0.543847849995263	0.527747948281903	0.582043972050997	0.591017841508146	0.753206717859593
+24	0.715360803826621	0.611610336420561	0	0.749908842875888	0.624415191219002	0.598837193048421	0.660837272556231	0.638184679569268	0.724777756165372	0.708236862111778	0.645430446977294	0.535631415741837	0.693070808707219	0.71138410637102	0.810870811602888	0.638724301482429	0.721802013418365	0.751900340516049	0.812623821498287	0.596958779537393	0.58038306362995	0.631041437421227	0.634765906326142	0.634430266921848
+27	0.624766305873718	0.655733483106344	0.749908842875888	0	0.595583136283997	0.480812012211359	0.509183154263298	0.68272679039539	0.503151848497109	0.745346748197303	0.742254576600809	0.581672470591416	0.574612957968774	0.720436148000664	0.836899929519594	0.712346970098655	0.722493161303423	0.81059478353512	0.67179686908506	0.741190787334003	0.632934861831476	0.743347684665419	0.787761710107256	0.545259963579148
+23	0.493154219155204	0.410315012072556	0.624415191219002	0.595583136283997	0	0.39798190453075	0.551027253590122	0.530245080842824	0.688639670633179	0.558293400969046	0.499126157677784	0.374655747006781	0.560198031134205	0.575636505169288	0.709011461350035	0.555007697254655	0.569632137079713	0.700292123849093	0.686164552961924	0.610923773488132	0.522084519099983	0.614601499571679	0.573249035174906	0.619889410480321
+19	0.43865006075341	0.453712867932398	0.598837193048421	0.480812012211359	0.39798190453075	0	0.447090470008913	0.462780164587808	0.62210941999066	0.517037833858162	0.547198044950618	0.436336586468505	0.466295105039809	0.533153195372235	0.649768050102464	0.468877587536534	0.489021376289978	0.59989451354897	0.600155810641532	0.482373873247893	0.455971043613941	0.485136325934296	0.561030342274039	0.514596020968001
+22	0.432340187193151	0.424523404131089	0.660837272556231	0.509183154263298	0.551027253590122	0.447090470008913	0	0.318721801577202	0.560878982326328	0.493770323127832	0.484678838434732	0.466044129553462	0.540556449240843	0.537716889466216	0.700184066308371	0.472868628510347	0.525506721244604	0.632959267911546	0.74368261302233	0.514539804165303	0.555492307675902	0.545820329626384	0.605832461377114	0.55345791449267
+16	0.440822999771334	0.377720269664141	0.638184679569268	0.68272679039539	0.530245080842824	0.462780164587808	0.318721801577202	0	0.664347219281937	0.418561808219052	0.408452070724878	0.358924933851484	0.5815245364665	0.515594594372192	0.532206785163483	0.4397020901552	0.529224758564709	0.566927472518065	0.753903949731256	0.515921987762518	0.494683547387077	0.495896853216952	0.583525904687767	0.658433720786117
+28	0.698420319937299	0.654941298927269	0.724777756165372	0.503151848497109	0.688639670633179	0.62210941999066	0.560878982326328	0.664347219281937	0	0.733169240464629	0.745486578838407	0.618846839169478	0.713792541454381	0.74890906664669	0.853976950339078	0.756087305765939	0.762966538206308	0.791917677364091	0.81258384801323	0.69092466328736	0.711493718852431	0.72961696443748	0.777088600232844	0.628275585712577
+13	0.46351050707088	0.417480326012996	0.708236862111778	0.745346748197303	0.558293400969046	0.517037833858162	0.493770323127832	0.418561808219052	0.733169240464629	0	0.434107073238778	0.495251428454508	0.659888526193112	0.482068398983308	0.561095050557647	0.335593085096115	0.482301178660398	0.475008544079799	0.738041440415793	0.50014768619111	0.58859107425281	0.560373485926146	0.58991583345561	0.703705376166457
+14	0.523211654445006	0.35798188425419	0.645430446977294	0.742254576600809	0.499126157677784	0.547198044950618	0.484678838434732	0.408452070724878	0.745486578838407	0.434107073238778	0	0.460778501195483	0.598177860722774	0.566437502573166	0.60997028881234	0.437494609608184	0.560278994397196	0.564565884853771	0.814568858546278	0.563078236117464	0.634425207383389	0.539163427744901	0.64719734947765	0.746247657025891
+20	0.408294556891784	0.360483800701559	0.535631415741837	0.581672470591416	0.374655747006781	0.436336586468505	0.466044129553462	0.358924933851484	0.618846839169478	0.495251428454508	0.460778501195483	0	0.614834763166026	0.527001028768971	0.636029763289345	0.402589693145724	0.520623812716001	0.635315798842479	0.669112826680887	0.536517399269416	0.410136150776988	0.536831397119922	0.57987764447421	0.650019383574573
+25	0.604911324328014	0.589951055270385	0.693070808707219	0.574612957968774	0.560198031134205	0.466295105039809	0.540556449240843	0.5815245364665	0.713792541454381	0.659888526193112	0.598177860722774	0.614834763166026	0	0.635642388269005	0.743838694259872	0.621437815990208	0.63532741224439	0.661420102186089	0.725910634940022	0.669519591989177	0.689034754247501	0.693127422615701	0.709588799419957	0.678749793333
+7	0.434178076197442	0.532758773890324	0.71138410637102	0.720436148000664	0.575636505169288	0.533153195372235	0.537716889466216	0.515594594372192	0.74890906664669	0.482068398983308	0.566437502573166	0.527001028768971	0.635642388269005	0	0.510261674576661	0.323081424973504	0.332861861510004	0.588465187037358	0.640307330403348	0.582254042755471	0.59221097693992	0.57611902810367	0.682931566080637	0.694784212730897
+5	0.545291972003212	0.584210632971972	0.810870811602888	0.836899929519594	0.709011461350035	0.649768050102464	0.700184066308371	0.532206785163483	0.853976950339078	0.561095050557647	0.60997028881234	0.636029763289345	0.743838694259872	0.510261674576661	0	0.476300258746816	0.526916663583772	0.593846530768998	0.758314271175518	0.691571027310973	0.761795894022086	0.702775696818748	0.581204902584864	0.85891984357451
+6	0.393272103797605	0.37822819560664	0.638724301482429	0.712346970098655	0.555007697254655	0.468877587536534	0.472868628510347	0.4397020901552	0.756087305765939	0.335593085096115	0.437494609608184	0.402589693145724	0.621437815990208	0.323081424973504	0.476300258746816	0	0.382921704843115	0.495001919452646	0.623361805111518	0.50370863979105	0.494400467755648	0.488742161380673	0.573395486170862	0.701044642267538
+3	0.330797959207167	0.53024185913761	0.721802013418365	0.722493161303423	0.569632137079713	0.489021376289978	0.525506721244604	0.529224758564709	0.762966538206308	0.482301178660398	0.560278994397196	0.520623812716001	0.63532741224439	0.332861861510004	0.526916663583772	0.382921704843115	0	0.541116521910622	0.538929257690873	0.50363315957546	0.477667500339432	0.485484212285837	0.642536439557206	0.731616786185991
+4	0.563648430837013	0.584066987502168	0.751900340516049	0.81059478353512	0.700292123849093	0.59989451354897	0.632959267911546	0.566927472518065	0.791917677364091	0.475008544079799	0.564565884853771	0.635315798842479	0.661420102186089	0.588465187037358	0.593846530768998	0.495001919452646	0.541116521910622	0	0.569041488360324	0.555333827555126	0.650999789659056	0.618391387774669	0.620122895019617	0.79004091039811
+2	0.66523173799306	0.778451630131581	0.812623821498287	0.67179686908506	0.686164552961924	0.600155810641532	0.74368261302233	0.753903949731256	0.81258384801323	0.738041440415793	0.814568858546278	0.669112826680887	0.725910634940022	0.640307330403348	0.758314271175518	0.623361805111518	0.538929257690873	0.569041488360324	0	0.6473786039708	0.537771707661098	0.605751675531862	0.787300504670568	0.71419000634693
+9	0.511183841155759	0.543847849995263	0.596958779537393	0.741190787334003	0.610923773488132	0.482373873247893	0.514539804165303	0.515921987762518	0.69092466328736	0.50014768619111	0.563078236117464	0.536517399269416	0.669519591989177	0.582254042755471	0.691571027310973	0.50370863979105	0.50363315957546	0.555333827555126	0.6473786039708	0	0.458910772686871	0.197543573969677	0.453468643433417	0.589071475907458
+12	0.46804467412999	0.527747948281903	0.58038306362995	0.632934861831476	0.522084519099983	0.455971043613941	0.555492307675902	0.494683547387077	0.711493718852431	0.58859107425281	0.634425207383389	0.410136150776988	0.689034754247501	0.59221097693992	0.761795894022086	0.494400467755648	0.477667500339432	0.650999789659056	0.537771707661098	0.458910772686871	0	0.405413621307249	0.521219574609625	0.6041813253885
+10	0.515485401604058	0.582043972050997	0.631041437421227	0.743347684665419	0.614601499571679	0.485136325934296	0.545820329626384	0.495896853216952	0.72961696443748	0.560373485926146	0.539163427744901	0.536831397119922	0.693127422615701	0.57611902810367	0.702775696818748	0.488742161380673	0.485484212285837	0.618391387774669	0.605751675531862	0.197543573969677	0.405413621307249	0	0.438888571031113	0.584480238627684
+11	0.580143332456965	0.591017841508146	0.634765906326142	0.787761710107256	0.573249035174906	0.561030342274039	0.605832461377114	0.583525904687767	0.777088600232844	0.58991583345561	0.64719734947765	0.57987764447421	0.709588799419957	0.682931566080637	0.581204902584864	0.573395486170862	0.642536439557206	0.620122895019617	0.787300504670568	0.453468643433417	0.521219574609625	0.438888571031113	0	0.747120312482333
+21	0.668700755517761	0.753206717859593	0.634430266921848	0.545259963579148	0.619889410480321	0.514596020968001	0.55345791449267	0.658433720786117	0.628275585712577	0.703705376166457	0.746247657025891	0.650019383574573	0.678749793333	0.694784212730897	0.85891984357451	0.701044642267538	0.731616786185991	0.79004091039811	0.71419000634693	0.589071475907458	0.6041813253885	0.584480238627684	0.747120312482333	0
diff --git a/skbio/stats/distance/tests/data/bioenv_exp_results_vegan.txt b/skbio/stats/distance/tests/data/bioenv_exp_results_vegan.txt
new file mode 100644
index 0000000..bbd7804
--- /dev/null
+++ b/skbio/stats/distance/tests/data/bioenv_exp_results_vegan.txt
@@ -0,0 +1,7 @@
+vars	size	correlation
+P	1	0.25163022609618835
+P, Al	2	0.4003778484896049
+P, Ca, Al	3	0.4004805867496183
+P, Ca, pH, Al	4	0.3618749732452448
+log(N), P, Ca, pH, Al	5	0.3215524892624249
+log(N), P, K, Ca, pH, Al	6	0.2821814757209515
diff --git a/skbio/stats/distance/tests/data/df.txt b/skbio/stats/distance/tests/data/df.txt
new file mode 100644
index 0000000..fd3161a
--- /dev/null
+++ b/skbio/stats/distance/tests/data/df.txt
@@ -0,0 +1,8 @@
+#SampleID	TOT_ORG_CARB	SILT_CLAY	ELEVATION	SOIL_MOISTURE_DEFICIT	CARB_NITRO_RATIO	ANNUAL_SEASON_TEMP	ANNUAL_SEASON_PRECPT	PH	CMIN_RATE	LONGITUDE	LATITUDE
+CA1.141704	16.7	73	2003	198	13	10.3	400	7.27	2.276	-111.7666667	36.05
+MT2.141698	39.1	35	1000	70	23.087	7	450	6.66	19.7	-114	46.8
+CO2.141657	18.1	24	2400	104	31.8	6.1	350	5.68	9.223	-105.3333333	40.58333333
+TL3.141709	53.9	52	894	-212	24.6	-9.3	400	4.23	16.456	-149.5833333	68.63333333
+SN3.141650	16.6	20	3000	-252	13.9	3.6	600	5.74	6.289	-118.1666667	36.45
+ExtraSample	42.0	42		-42	breh	3.6	602	1.74	0.289	-122.7	36.45
+BB2.141659	52.2	44	400	-680	21.4	6.1	1200	4.6	2.223	-68.1	44.86666667
diff --git a/skbio/stats/distance/tests/data/df_extra_column.txt b/skbio/stats/distance/tests/data/df_extra_column.txt
new file mode 100644
index 0000000..d2dd6e0
--- /dev/null
+++ b/skbio/stats/distance/tests/data/df_extra_column.txt
@@ -0,0 +1,8 @@
+#SampleID	SILT_CLAY	ELEVATION	ExtraColumn	SOIL_MOISTURE_DEFICIT	ANNUAL_SEASON_TEMP	CARB_NITRO_RATIO	ANNUAL_SEASON_PRECPT	PH	CMIN_RATE	LONGITUDE	LATITUDE	TOT_ORG_CARB
+TL3.141709	52	894	42	-212	-9.3	24.6	400	4.23	16.456	-149.5833333	68.63333333	53.9
+MT2.141698	35	1000	column	70	7	23.087	450	6.66	19.7	-114	46.8	39.1
+CO2.141657	24	2400	with	104	6.1	31.8	350	5.68	9.223	-105.3333333	40.58333333	18.1
+ExtraSample	42		some	-42	3.6	breh	602	1.74	0.289	-122.7	36.45	42.0
+SN3.141650	20	3000	really	-252	3.6	13.9	600	5.74	6.289	-118.1666667	36.45	16.6
+BB2.141659	44	400	invalid	-680	6.1	21.4	1200	4.6	2.223	-68.1	44.86666667	52.2
+CA1.141704	73	2003	data	198	10.3	13	400	7.27	2.276	-111.7666667	36.05	16.7
diff --git a/skbio/stats/distance/tests/data/dm.txt b/skbio/stats/distance/tests/data/dm.txt
new file mode 100644
index 0000000..76251ec
--- /dev/null
+++ b/skbio/stats/distance/tests/data/dm.txt
@@ -0,0 +1,7 @@
+	MT2.141698	CA1.141704	BB2.141659	CO2.141657	TL3.141709	SN3.141650
+MT2.141698	0.0	0.623818643706	0.750015427505	0.585201193913	0.729023583672	0.622135587669
+CA1.141704	0.623818643706	0.0	0.774881224555	0.649822398416	0.777203137034	0.629507320436
+BB2.141659	0.750015427505	0.774881224555	0.0	0.688845424001	0.567470311282	0.721707516043
+CO2.141657	0.585201193913	0.649822398416	0.688845424001	0.0	0.658853575764	0.661223617505
+TL3.141709	0.729023583672	0.777203137034	0.567470311282	0.658853575764	0.0	0.711173405838
+SN3.141650	0.622135587669	0.629507320436	0.721707516043	0.661223617505	0.711173405838	0.0
diff --git a/skbio/stats/distance/tests/data/dm2.txt b/skbio/stats/distance/tests/data/dm2.txt
new file mode 100644
index 0000000..0de75ac
--- /dev/null
+++ b/skbio/stats/distance/tests/data/dm2.txt
@@ -0,0 +1,7 @@
+	MT2.141698	CA1.141704	BB2.141659	CO2.141657	TL3.141709	SN3.141650
+MT2.141698	0.0	0.623818643706	0.750015427505	0.585201193913	0.729023583672	0.822135587669
+CA1.141704	0.623818643706	0.0	0.974881224555	0.649822398416	0.737203137034	0.629507320436
+BB2.141659	0.750015427505	0.974881224555	0.0	0.688845424001	0.567470311282	0.721707516043
+CO2.141657	0.585201193913	0.649822398416	0.688845424001	0.0	0.658853575764	0.661223617505
+TL3.141709	0.729023583672	0.737203137034	0.567470311282	0.658853575764	0.0	0.711173405838
+SN3.141650	0.822135587669	0.629507320436	0.721707516043	0.661223617505	0.711173405838	0.0
diff --git a/skbio/stats/distance/tests/data/dm3.txt b/skbio/stats/distance/tests/data/dm3.txt
new file mode 100644
index 0000000..ee64026
--- /dev/null
+++ b/skbio/stats/distance/tests/data/dm3.txt
@@ -0,0 +1,7 @@
+	MT2.141698	CA1.141704	BB2.141659	CO2.141657	TL3.141709	SN3.141650
+MT2.141698	0.0	0.523818643706	0.750015427505	0.585201193913	0.729023583672	0.622135587669
+CA1.141704	0.523818643706	0.0	0.774881224555	0.649822398416	0.777203137034	0.629507320436
+BB2.141659	0.750015427505	0.774881224555	0.0	0.687745424001	0.567470311282	0.721707516043
+CO2.141657	0.585201193913	0.649822398416	0.687745424001	0.0	0.658853575764	0.661223617505
+TL3.141709	0.729023583672	0.777203137034	0.567470311282	0.658853575764	0.0	0.781173405838
+SN3.141650	0.622135587669	0.629507320436	0.721707516043	0.661223617505	0.781173405838	0.0
diff --git a/skbio/stats/distance/tests/data/dm4.txt b/skbio/stats/distance/tests/data/dm4.txt
new file mode 100644
index 0000000..c434329
--- /dev/null
+++ b/skbio/stats/distance/tests/data/dm4.txt
@@ -0,0 +1,7 @@
+	MT2.141698	CA1.141704	BB2.141659	CO2.141657	TL3.141709	SN3.141650
+MT2.141698	0.0	0.623818643706	0.750015427505	0.585201193913	0.729023583672	0.652135587669
+CA1.141704	0.623818643706	0.0	0.974881224555	0.649822398416	0.737203137034	0.629507320436
+BB2.141659	0.750015427505	0.974881224555	0.0	0.688845424001	0.657470311282	0.721707516043
+CO2.141657	0.585201193913	0.649822398416	0.688845424001	0.0	0.658853575764	0.661223617505
+TL3.141709	0.729023583672	0.737203137034	0.657470311282	0.658853575764	0.0	0.711173405838
+SN3.141650	0.652135587669	0.629507320436	0.721707516043	0.661223617505	0.711173405838	0.0
diff --git a/skbio/stats/distance/tests/data/dm_reordered.txt b/skbio/stats/distance/tests/data/dm_reordered.txt
new file mode 100644
index 0000000..610b1f5
--- /dev/null
+++ b/skbio/stats/distance/tests/data/dm_reordered.txt
@@ -0,0 +1,7 @@
+	MT2.141698	CO2.141657	BB2.141659	CA1.141704	TL3.141709	SN3.141650
+MT2.141698	0.0	0.585201193913	0.750015427505	0.623818643706	0.729023583672	0.622135587669
+CO2.141657	0.585201193913	0.0	0.688845424001	0.649822398416	0.658853575764	0.661223617505
+BB2.141659	0.750015427505	0.688845424001	0.0	0.774881224555	0.567470311282	0.721707516043
+CA1.141704	0.623818643706	0.649822398416	0.774881224555	0.0	0.777203137034	0.629507320436
+TL3.141709	0.729023583672	0.658853575764	0.567470311282	0.777203137034	0.0	0.711173405838
+SN3.141650	0.622135587669	0.661223617505	0.721707516043	0.629507320436	0.711173405838	0.0
diff --git a/skbio/stats/distance/tests/data/exp_results.txt b/skbio/stats/distance/tests/data/exp_results.txt
new file mode 100644
index 0000000..8756468
--- /dev/null
+++ b/skbio/stats/distance/tests/data/exp_results.txt
@@ -0,0 +1,12 @@
+vars	size	correlation
+PH	1	0.75
+SOIL_MOISTURE_DEFICIT, PH	2	0.7464285714285714
+SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, PH	3	0.8107142857142857
+TOT_ORG_CARB, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, PH	4	0.8107142857142857
+ELEVATION, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, ANNUAL_SEASON_TEMP, PH	5	0.7892857142857144
+TOT_ORG_CARB, ELEVATION, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, ANNUAL_SEASON_TEMP, PH	6	0.775
+TOT_ORG_CARB, ELEVATION, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, ANNUAL_SEASON_TEMP, ANNUAL_SEASON_PRECPT, PH	7	0.7285714285714285
+TOT_ORG_CARB, ELEVATION, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, ANNUAL_SEASON_TEMP, ANNUAL_SEASON_PRECPT, PH, CMIN_RATE	8	0.675
+TOT_ORG_CARB, SILT_CLAY, ELEVATION, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, ANNUAL_SEASON_TEMP, ANNUAL_SEASON_PRECPT, PH, CMIN_RATE	9	0.6392857142857143
+TOT_ORG_CARB, SILT_CLAY, ELEVATION, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, ANNUAL_SEASON_TEMP, ANNUAL_SEASON_PRECPT, PH, CMIN_RATE, LATITUDE	10	0.6071428571428571
+TOT_ORG_CARB, SILT_CLAY, ELEVATION, SOIL_MOISTURE_DEFICIT, CARB_NITRO_RATIO, ANNUAL_SEASON_TEMP, ANNUAL_SEASON_PRECPT, PH, CMIN_RATE, LONGITUDE, LATITUDE	11	0.5392857142857144
diff --git a/skbio/stats/distance/tests/data/exp_results_different_column_order.txt b/skbio/stats/distance/tests/data/exp_results_different_column_order.txt
new file mode 100644
index 0000000..5ced44b
--- /dev/null
+++ b/skbio/stats/distance/tests/data/exp_results_different_column_order.txt
@@ -0,0 +1,12 @@
+vars	size	correlation
+PH	1	0.75
+PH, SOIL_MOISTURE_DEFICIT	2	0.7464285714285714
+PH, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT	3	0.8107142857142857
+PH, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, TOT_ORG_CARB	4	0.8107142857142857
+PH, ANNUAL_SEASON_TEMP, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, ELEVATION	5	0.7892857142857144
+PH, ANNUAL_SEASON_TEMP, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, ELEVATION, TOT_ORG_CARB	6	0.775
+PH, ANNUAL_SEASON_PRECPT, ANNUAL_SEASON_TEMP, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, ELEVATION, TOT_ORG_CARB	7	0.7285714285714285
+CMIN_RATE, PH, ANNUAL_SEASON_PRECPT, ANNUAL_SEASON_TEMP, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, ELEVATION, TOT_ORG_CARB	8	0.675
+CMIN_RATE, PH, ANNUAL_SEASON_PRECPT, ANNUAL_SEASON_TEMP, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, ELEVATION, SILT_CLAY, TOT_ORG_CARB	9	0.6392857142857143
+LATITUDE, CMIN_RATE, PH, ANNUAL_SEASON_PRECPT, ANNUAL_SEASON_TEMP, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, ELEVATION, SILT_CLAY, TOT_ORG_CARB	10	0.6071428571428571
+LATITUDE, LONGITUDE, CMIN_RATE, PH, ANNUAL_SEASON_PRECPT, ANNUAL_SEASON_TEMP, CARB_NITRO_RATIO, SOIL_MOISTURE_DEFICIT, ELEVATION, SILT_CLAY, TOT_ORG_CARB	11	0.5392857142857144
diff --git a/skbio/stats/distance/tests/data/exp_results_single_column.txt b/skbio/stats/distance/tests/data/exp_results_single_column.txt
new file mode 100644
index 0000000..69633cd
--- /dev/null
+++ b/skbio/stats/distance/tests/data/exp_results_single_column.txt
@@ -0,0 +1,2 @@
+vars	size	correlation
+PH	1	0.75
diff --git a/skbio/stats/distance/tests/data/mantel_env_dm_vegan.txt b/skbio/stats/distance/tests/data/mantel_env_dm_vegan.txt
new file mode 100644
index 0000000..fa11ebd
--- /dev/null
+++ b/skbio/stats/distance/tests/data/mantel_env_dm_vegan.txt
@@ -0,0 +1,24 @@
+0	2.07679533239178	6.88145502389971	4.47292990349355	2.50815747366843	3.58036212296744	2.6591292750976	2.29553760953804	4.71171667874897	5.16305059040855	2.21426134242764	3.92961213866286	3.13399015473536	4.90270725079791	5.49476199174931	4.24083600731234	6.11441501785041	6.05615282291975	4.95222984315722	4.21090492925648	2.85939902504818	4.97193013540636	6.55517500441812	2.54101803759533
+2.07679533239178	0	6.90291144667953	4.64003084174476	3.72544116897342	4.23057909059422	3.32236571813861	2.52327233791657	5.13562550988481	5.1437583829781	2.50862666200143	4.72257599677909	3.43881324942989	4.85150831682718	5.50342943536056	3.52222099128756	5.87491943777315	5.49066317297654	4.27208092800677	3.15270231033824	1.74819033890028	4.12374288054719	6.15931692104792	3.35050234347791
+6.88145502389971	6.90291144667953	0	5.48014130249836	5.71422767011491	5.44458516207639	6.21128694887343	7.57897769844682	5.72787031540342	5.50347777475312	6.05215034685817	5.13756253132332	6.81750519634229	8.76568212120474	9.04444618731187	8.33992506606364	9.11412838063313	5.67503738892289	6.94693258686523	5.30131540387761	6.85740763239976	5.5325902108287	6.30781548936649	7.84284307315349
+4.47292990349355	4.64003084174476	5.48014130249836	0	3.48549610587919	3.96397362332123	3.14459815680532	5.59791469171996	2.25198909783448	5.18311849821211	4.54741248463763	3.71058624754798	2.69797588499755	7.82504504220559	7.61406607135237	7.13381545095109	8.44384430044265	6.71088081591996	6.35910146136893	5.18001442223706	5.2381960545819	4.7094220651569	6.46329094926891	5.88708881996361
+2.50815747366843	3.72544116897342	5.71422767011491	3.48549610587919	0	2.09064557523382	2.35007842477788	3.84437623072582	3.37982049066074	4.82948860655148	2.61344414995761	3.08212085298007	3.39826001248126	6.34083256759322	6.78044024394629	5.9882682085243	7.368182657586	6.29229240618139	5.93365009305124	4.74001147278235	4.16875045563175	5.19193126631916	6.60549692940372	3.75170839493609
+3.58036212296744	4.23057909059422	5.44458516207639	3.96397362332123	2.09064557523382	0	2.91550590841545	4.53693024483879	4.16796766816088	4.70318440048524	2.79541837375649	3.79457424274492	4.23335312544269	6.52562476479155	7.28216604836819	6.15919058556992	7.48332275942444	6.29601002300864	6.13547796850531	4.6792323698443	4.45343001677936	5.90808732531762	7.12563061433389	4.29741620103145
+2.6591292750976	3.32236571813861	6.21128694887343	3.14459815680532	2.35007842477788	2.91550590841545	0	3.03537232143609	3.19717141237262	4.78031309163852	2.41753325811849	3.59607995374352	2.12854912039549	5.46704031619679	5.33701156479808	4.96043060818422	6.44826304723108	6.36381765874823	5.33050747387333	4.28625407689455	3.30334570818273	4.94667231796753	6.31277712868333	3.58610710419189
+2.29553760953804	2.52327233791657	7.57897769844682	5.59791469171996	3.84437623072582	4.53693024483879	3.03537232143609	0	5.42581905199784	5.5382425777602	2.21628461900339	4.91569734557852	3.76898373844346	3.66958270784948	3.7596809535214	3.09087676998529	5.16423793264611	6.19348191138961	4.67007617153741	4.11204634372634	2.12921147954829	5.20716627366845	6.64769970583513	2.40407490833653
+4.71171667874897	5.13562550988481	5.72787031540342	2.25198909783448	3.37982049066074	4.16796766816088	3.19717141237262	5.42581905199784	0	4.78731905692567	4.50479586376459	3.38670838656427	3.20693742829686	7.42683056394166	7.34570046678601	7.17015799953149	7.98002716787638	6.41561622183692	6.2124182195955	5.39657010741707	5.54110800674231	5.15776751659351	6.8516874226519	6.10574081766257
+5.16305059040855	5.1437583829781	5.50347777475312	5.18311849821211	4.82948860655148	4.70318440048524	4.78031309163852	5.5382425777602	4.78731905692567	0	4.67746569480341	4.08051856974203	5.63341628143736	6.13993687315028	7.7127998868603	6.72017341542958	6.71492819699427	4.33113554849634	4.44684649984502	4.82408553908334	5.61768442520245	5.26230208160398	6.54992709860496	6.42845550946653
+2.21426134242764	2.50862666200143	6.05215034685817	4.54741248463763	2.61344414995761	2.79541837375649	2.41753325811849	2.21628461900339	4.50479586376459	4.67746569480341	0	3.67536647198733	3.50853972062287	4.8908007524505	5.29902826407876	4.05541661524461	6.11473384050956	5.23487714375643	4.87089053925741	3.21806378630338	2.37451207574614	5.0909474774308	6.73970215554685	3.03452592937357
+3.92961213866286	4.72257599677909	5.13756253132332	3.71058624754798	3.08212085298007	3.79457424274492	3.59607995374352	4.91569734557852	3.38670838656427	4.08051856974203	3.67536647198733	0	3.67506523963379	6.73163254348659	7.18535849292866	6.60015854488732	7.50539564845393	4.98323825896227	5.96071394777962	4.63371782472599	5.21021128432828	5.3351470840398	6.70375146848393	5.02891238126826
+3.13399015473536	3.43881324942989	6.81750519634229	2.69797588499755	3.39826001248126	4.23335312544269	2.12854912039549	3.76898373844346	3.20693742829686	5.63341628143736	3.50853972062287	3.67506523963379	0	6.07359964931452	5.55282592144639	5.28753056505203	6.86459352948227	6.60873730627615	5.6314567316324	4.5625504604801	3.80862359775727	4.74234966729958	6.39367296383052	4.14114063107224
+4.90270725079791	4.85150831682718	8.76568212120474	7.82504504220559	6.34083256759322	6.52562476479155	5.46704031619679	3.66958270784948	7.42683056394166	6.13993687315028	4.8908007524505	6.73163254348659	6.07359964931452	0	3.95581421039689	2.89070054815662	2.00584491640009	6.4169416509523	3.98054063154574	5.12176871169235	4.25495681827209	6.42605641292207	6.84353264931	4.64487610678987
+5.49476199174931	5.50342943536056	9.04444618731187	7.61406607135237	6.78044024394629	7.28216604836819	5.33701156479808	3.7596809535214	7.34570046678601	7.7127998868603	5.29902826407876	7.18535849292866	5.55282592144639	3.95581421039689	0	4.04435991796256	5.01228422881506	8.11947221764963	5.86295047339197	5.99126094814824	4.58960572590818	6.61131730349409	7.40381675001318	4.84021195384958
+4.24083600731234	3.52222099128756	8.33992506606364	7.13381545095109	5.9882682085243	6.15919058556992	4.96043060818422	3.09087676998529	7.17015799953149	6.72017341542958	4.05541661524461	6.60015854488732	5.28753056505203	2.89070054815662	4.04435991796256	0	3.64256798987379	6.01870458325898	4.26211882754801	3.82653767294834	2.5630508080946	5.89444500689165	6.82255468001791	4.23311465341886
+6.11441501785041	5.87491943777315	9.11412838063313	8.44384430044265	7.368182657586	7.48332275942444	6.44826304723108	5.16423793264611	7.98002716787638	6.71492819699427	6.11473384050956	7.50539564845393	6.86459352948227	2.00584491640009	5.01228422881506	3.64256798987379	0	6.60501226761683	3.94055918447939	5.63216883653706	5.30106624617202	6.83112869949915	6.80321310613232	6.05396442753773
+6.05615282291975	5.49066317297654	5.67503738892289	6.71088081591996	6.29229240618139	6.29601002300864	6.36381765874823	6.19348191138961	6.41561622183692	4.33113554849634	5.23487714375643	4.98323825896227	6.60873730627615	6.4169416509523	8.11947221764963	6.01870458325898	6.60501226761683	0	4.58594627180039	3.55328319122345	5.54504004833869	5.61499654775589	6.828621895957	7.2430575511398
+4.95222984315722	4.27208092800677	6.94693258686523	6.35910146136893	5.93365009305124	6.13547796850531	5.33050747387333	4.67007617153741	6.2124182195955	4.44684649984502	4.87089053925741	5.96071394777962	5.6314567316324	3.98054063154574	5.86295047339197	4.26211882754801	3.94055918447939	4.58594627180039	0	3.90515299381786	4.28027849695426	4.38635292373633	5.39036567068314	6.01887662312089
+4.21090492925648	3.15270231033824	5.30131540387761	5.18001442223706	4.74001147278235	4.6792323698443	4.28625407689455	4.11204634372634	5.39657010741707	4.82408553908334	3.21806378630338	4.63371782472599	4.5625504604801	5.12176871169235	5.99126094814824	3.82653767294834	5.63216883653706	3.55328319122345	3.90515299381786	0	2.65742337348713	3.85104238688071	5.57317463010724	5.08393769559955
+2.85939902504818	1.74819033890028	6.85740763239976	5.2381960545819	4.16875045563175	4.45343001677936	3.30334570818273	2.12921147954829	5.54110800674231	5.61768442520245	2.37451207574614	5.21021128432828	3.80862359775727	4.25495681827209	4.58960572590818	2.5630508080946	5.30106624617202	5.54504004833869	4.28027849695426	2.65742337348713	0	4.28103353313767	5.86614162833534	3.27887996130286
+4.97193013540636	4.12374288054719	5.5325902108287	4.7094220651569	5.19193126631916	5.90808732531762	4.94667231796753	5.20716627366845	5.15776751659351	5.26230208160398	5.0909474774308	5.3351470840398	4.74234966729958	6.42605641292207	6.61131730349409	5.89444500689165	6.83112869949915	5.61499654775589	4.38635292373633	3.85104238688071	4.28103353313767	0	3.33721452854301	5.88678552682047
+6.55517500441812	6.15931692104792	6.30781548936649	6.46329094926891	6.60549692940372	7.12563061433389	6.31277712868333	6.64769970583513	6.8516874226519	6.54992709860496	6.73970215554685	6.70375146848393	6.39367296383052	6.84353264931	7.40381675001318	6.82255468001791	6.80321310613232	6.828621895957	5.39036567068314	5.57317463010724	5.86614162833534	3.33721452854301	0	6.7434003020233
+2.54101803759533	3.35050234347791	7.84284307315349	5.88708881996361	3.75170839493609	4.29741620103145	3.58610710419189	2.40407490833653	6.10574081766257	6.42845550946653	3.03452592937357	5.02891238126826	4.14114063107224	4.64487610678987	4.84021195384958	4.23311465341886	6.05396442753773	7.2430575511398	6.01887662312089	5.08393769559955	3.27887996130286	5.88678552682047	6.7434003020233	0
diff --git a/skbio/stats/distance/tests/data/mantel_veg_dm_vegan.txt b/skbio/stats/distance/tests/data/mantel_veg_dm_vegan.txt
new file mode 100644
index 0000000..59ca6d9
--- /dev/null
+++ b/skbio/stats/distance/tests/data/mantel_veg_dm_vegan.txt
@@ -0,0 +1,24 @@
+0	0.531002122667858	0.668066081456845	0.562124668311531	0.3747077813648	0.509473807708101	0.623441897868918	0.533760972316003	0.841820910455228	0.345334659061175	0.544980977465613	0.387906855391038	0.631889119284077	0.360369716481462	0.495569929879871	0.338230884557721	0.527747996119875	0.46940182727116	0.572409158546537	0.658356940509915	0.468803827751196	0.624899560429172	0.445852306473965	0.556086425651643
+0.531002122667858	0	0.359778297016791	0.405560971081094	0.365209673840692	0.456075749605471	0.35795169856699	0.397667376920489	0.522541392379812	0.60638464347191	0.48037557590249	0.378418761774833	0.337611485751577	0.671739130434783	0.717861205915813	0.635512156237545	0.757850272278487	0.684397366160893	0.820626944245034	0.776103945014594	0.679419902681042	0.764456383429945	0.471627356163032	0.760728080850839
+0.668066081456845	0.359778297016791	0	0.493494677463379	0.502030649266258	0.509231821290175	0.50100504976222	0.590762326930444	0.573666487726319	0.757674655787863	0.653360627772091	0.434689236988378	0.336909757263505	0.793106938610254	0.856175256640914	0.744137292184681	0.83821186778176	0.830987488829312	0.837255085778569	0.759051704257184	0.689453763842811	0.78428294394681	0.567737261531031	0.727272727272727
+0.562124668311531	0.405560971081094	0.493494677463379	0	0.428611098255195	0.487819025522041	0.465522369035986	0.568393032270116	0.302780246286657	0.754373626373627	0.746791469651645	0.495783259540375	0.500159329903947	0.77929167212542	0.873218985388874	0.749693459050357	0.809023592216291	0.841379965785972	0.758192367410313	0.741589782845171	0.625361639704983	0.70965400435519	0.632291885091214	0.54560014236142
+0.3747077813648	0.365209673840692	0.502030649266258	0.428611098255195	0	0.360624235718861	0.481270602337428	0.409431221365516	0.697951879101213	0.622147127631921	0.564580791354366	0.287701365077311	0.425861695209191	0.639083780437474	0.729525454839929	0.625248311481923	0.712879825495866	0.711791887728906	0.724986879144997	0.669388904533934	0.538476172358033	0.662547573180473	0.471028037383178	0.495122079839688
+0.509473807708101	0.456075749605471	0.509231821290175	0.487819025522041	0.360624235718861	0	0.47264827514003	0.449673125625773	0.643173412512372	0.573924437650142	0.633194205745151	0.395377631035906	0.431129869389152	0.695857026807473	0.789820495341968	0.568402976531196	0.530275619878684	0.517760385310054	0.538922155688623	0.539314268896846	0.428855622420053	0.505990964447063	0.329349338004224	0.531589448015961
+0.623441897868918	0.35795169856699	0.50100504976222	0.465522369035986	0.481270602337428	0.47264827514003	0	0.267803106717773	0.598566633402885	0.694873612095441	0.535760932030719	0.46270201874964	0.382298106172113	0.745988552125364	0.861145064293746	0.724916160609081	0.802615164256341	0.801531425047536	0.832146445891039	0.772508176966776	0.70517511761631	0.787532847973119	0.581221922731357	0.677116737663332
+0.533760972316003	0.397667376920489	0.590762326930444	0.568393032270116	0.409431221365516	0.449673125625773	0.267803106717773	0	0.701535990362414	0.551494144215115	0.482635012046777	0.373779728755815	0.430605761857816	0.659614382490881	0.718478913197665	0.650987864807943	0.683795276397356	0.646264782232478	0.735420178184445	0.818586585827965	0.634216589861751	0.765659822656361	0.517282479141836	0.747455911343173
+0.841820910455228	0.522541392379812	0.573666487726319	0.302780246286657	0.697951879101213	0.643173412512372	0.598566633402885	0.701535990362414	0	0.860012230114305	0.823966727319989	0.696356043704593	0.608614963860129	0.896020179372198	0.953959215457244	0.9014440433213	0.923448497952799	0.938116913005677	0.905321324457207	0.868666952421774	0.854316702819956	0.901660445359763	0.754406370224568	0.724877304998332
+0.345334659061175	0.60638464347191	0.757674655787863	0.754373626373627	0.622147127631921	0.573924437650142	0.694873612095441	0.551494144215115	0.860012230114305	0	0.554756456358287	0.57855421686747	0.741260525644297	0.453305402640425	0.514898810129157	0.351567272898162	0.496547756041427	0.388174807197943	0.596869061623382	0.729252973406388	0.590238621498489	0.716043877642022	0.427280771201043	0.72127723067741
+0.544980977465613	0.48037557590249	0.653360627772091	0.746791469651645	0.564580791354366	0.633194205745151	0.535760932030719	0.482635012046777	0.823966727319989	0.554756456358287	0	0.511525795828759	0.554151727277901	0.655082959520078	0.725768053508672	0.622747338353674	0.783666100254885	0.673474291206151	0.859248929176213	0.828249694002448	0.750707371556218	0.830408783452603	0.674327728932078	0.809644950779781
+0.387906855391038	0.378418761774833	0.434689236988378	0.495783259540375	0.287701365077311	0.395377631035906	0.46270201874964	0.373779728755815	0.696356043704593	0.57855421686747	0.511525795828759	0	0.451855598406777	0.595916174099946	0.715382678751259	0.543911776469916	0.669047897880314	0.677185443802873	0.695153896529142	0.698248620855873	0.518242618672176	0.67063492063492	0.446171234812094	0.632043116119549
+0.631889119284077	0.337611485751577	0.336909757263505	0.500159329903947	0.425861695209191	0.431129869389152	0.382298106172113	0.430605761857816	0.608614963860129	0.741260525644297	0.554151727277901	0.451855598406777	0	0.755672609400324	0.860085791452629	0.734387197501951	0.816868430229662	0.840013416065739	0.817908907547258	0.788424259207976	0.70625642943982	0.784595481217946	0.617593040115998	0.746623195156032
+0.360369716481462	0.671739130434783	0.793106938610254	0.77929167212542	0.639083780437474	0.695857026807473	0.745988552125364	0.659614382490881	0.896020179372198	0.453305402640425	0.655082959520078	0.595916174099946	0.755672609400324	0	0.32374463790058	0.175471345902557	0.515448702689836	0.560172148132405	0.646577681901937	0.83184352597575	0.699166591988529	0.769745293466224	0.526223291092406	0.793334979017526
+0.495569929879871	0.717861205915813	0.856175256640914	0.873218985388874	0.729525454839929	0.789820495341968	0.861145064293746	0.718478913197665	0.953959215457244	0.514898810129157	0.725768053508672	0.715382678751259	0.860085791452629	0.32374463790058	0	0.398453833811446	0.563443208895949	0.537750556792873	0.725759700794764	0.901458342917606	0.780864053688773	0.850419084461638	0.556379750493714	0.888831597401258
+0.338230884557721	0.635512156237545	0.744137292184681	0.749693459050357	0.625248311481923	0.568402976531196	0.724916160609081	0.650987864807943	0.9014440433213	0.351567272898162	0.622747338353674	0.543911776469916	0.734387197501951	0.175471345902557	0.398453833811446	0	0.451762651100795	0.466509988249118	0.555275398861018	0.722312633832977	0.576246207195492	0.656792557980023	0.407794761777063	0.672014090541248
+0.527747996119875	0.757850272278487	0.83821186778176	0.809023592216291	0.712879825495866	0.530275619878684	0.802615164256341	0.683795276397356	0.923448497952799	0.496547756041427	0.783666100254885	0.669047897880314	0.816868430229662	0.515448702689836	0.563443208895949	0.451762651100795	0	0.359268929503916	0.20992028343667	0.388581148863785	0.264185101779485	0.341337757596926	0.300259669263359	0.750777302759425
+0.46940182727116	0.684397366160893	0.830987488829312	0.841379965785972	0.711791887728906	0.517760385310054	0.801531425047536	0.646264782232478	0.938116913005677	0.388174807197943	0.673474291206151	0.677185443802873	0.840013416065739	0.560172148132405	0.537750556792873	0.466509988249118	0.359268929503916	0	0.484114532261228	0.622234006652847	0.487074231539852	0.577606177606177	0.321596600394597	0.764130434782609
+0.572409158546537	0.820626944245034	0.837255085778569	0.758192367410313	0.724986879144997	0.538922155688623	0.832146445891039	0.735420178184445	0.905321324457207	0.596869061623382	0.859248929176213	0.695153896529142	0.817908907547258	0.646577681901937	0.725759700794764	0.555275398861018	0.20992028343667	0.484114532261228	0	0.233028585612046	0.184614740719039	0.145672877846791	0.420959606845713	0.677966101694915
+0.658356940509915	0.776103945014594	0.759051704257184	0.741589782845171	0.669388904533934	0.539314268896846	0.772508176966776	0.818586585827965	0.868666952421774	0.729252973406388	0.828249694002448	0.698248620855873	0.788424259207976	0.83184352597575	0.901458342917606	0.722312633832977	0.388581148863785	0.622234006652847	0.233028585612046	0	0.227722772277228	0.111727966689799	0.514525993883792	0.595256346665464
+0.468803827751196	0.679419902681042	0.689453763842811	0.625361639704983	0.538476172358033	0.428855622420053	0.70517511761631	0.634216589861751	0.854316702819956	0.590238621498489	0.750707371556218	0.518242618672176	0.70625642943982	0.699166591988529	0.780864053688773	0.576246207195492	0.264185101779485	0.487074231539852	0.184614740719039	0.227722772277228	0	0.179336829499938	0.368810178817056	0.560213727907933
+0.624899560429172	0.764456383429945	0.78428294394681	0.70965400435519	0.662547573180473	0.505990964447063	0.787532847973119	0.765659822656361	0.901660445359763	0.716043877642022	0.830408783452603	0.67063492063492	0.784595481217946	0.769745293466224	0.850419084461638	0.656792557980023	0.341337757596926	0.577606177606177	0.145672877846791	0.111727966689799	0.179336829499938	0	0.50435780791633	0.614787397309741
+0.445852306473965	0.471627356163032	0.567737261531031	0.632291885091214	0.471028037383178	0.329349338004224	0.581221922731357	0.517282479141836	0.754406370224568	0.427280771201043	0.674327728932078	0.446171234812094	0.617593040115998	0.526223291092406	0.556379750493714	0.407794761777063	0.300259669263359	0.321596600394597	0.420959606845713	0.514525993883792	0.368810178817056	0.50435780791633	0	0.671336257723692
+0.556086425651643	0.760728080850839	0.727272727272727	0.54560014236142	0.495122079839688	0.531589448015961	0.677116737663332	0.747455911343173	0.724877304998332	0.72127723067741	0.809644950779781	0.632043116119549	0.746623195156032	0.793334979017526	0.888831597401258	0.672014090541248	0.750777302759425	0.764130434782609	0.677966101694915	0.595256346665464	0.560213727907933	0.614787397309741	0.671336257723692	0
diff --git a/skbio/stats/distance/tests/data/pwmantel_exp_results_all_dms.txt b/skbio/stats/distance/tests/data/pwmantel_exp_results_all_dms.txt
new file mode 100644
index 0000000..5cd78b1
--- /dev/null
+++ b/skbio/stats/distance/tests/data/pwmantel_exp_results_all_dms.txt
@@ -0,0 +1,7 @@
+dm1	dm2	statistic	p-value	n	method	permutations	alternative
+0	1	0.7020310705446676	0.001	6	pearson	999	two-sided
+0	2	0.8633966325233801	0.002	6	pearson	999	two-sided
+0	3	0.6476901774685102	0.006	6	pearson	999	two-sided
+1	2	0.7784836464659731	0.003	6	pearson	999	two-sided
+1	3	0.9206880242368882	0.004	6	pearson	999	two-sided
+2	3	0.7172972179393844	0.003	6	pearson	999	two-sided
diff --git a/skbio/stats/distance/tests/data/pwmantel_exp_results_dm_dm2.txt b/skbio/stats/distance/tests/data/pwmantel_exp_results_dm_dm2.txt
new file mode 100644
index 0000000..8618d0d
--- /dev/null
+++ b/skbio/stats/distance/tests/data/pwmantel_exp_results_dm_dm2.txt
@@ -0,0 +1,2 @@
+dm1	dm2	statistic	p-value	n	method	permutations	alternative
+0	1	0.702031	0.001	6	pearson	999	two-sided
diff --git a/skbio/stats/distance/tests/data/pwmantel_exp_results_duplicate_dms.txt b/skbio/stats/distance/tests/data/pwmantel_exp_results_duplicate_dms.txt
new file mode 100644
index 0000000..ee0d3ef
--- /dev/null
+++ b/skbio/stats/distance/tests/data/pwmantel_exp_results_duplicate_dms.txt
@@ -0,0 +1,4 @@
+dm1	dm2	statistic	p-value	n	method	permutations	alternative
+0	1	1.0	1.000	3	pearson	999	less
+0	2	1.0	1.000	3	pearson	999	less
+1	2	1.0	1.000	3	pearson	999	less
diff --git a/skbio/stats/distance/tests/data/pwmantel_exp_results_minimal.txt b/skbio/stats/distance/tests/data/pwmantel_exp_results_minimal.txt
new file mode 100644
index 0000000..60cb48b
--- /dev/null
+++ b/skbio/stats/distance/tests/data/pwmantel_exp_results_minimal.txt
@@ -0,0 +1,4 @@
+dm1	dm2	statistic	p-value	n	method	permutations	alternative
+0	1	0.7559289460184544	0.324	3	pearson	999	greater
+0	2	-0.989743318610787	1.000	3	pearson	999	greater
+1	2	-0.8416975766245421	0.835	3	pearson	999	greater
diff --git a/skbio/stats/distance/tests/data/pwmantel_exp_results_minimal_with_labels.txt b/skbio/stats/distance/tests/data/pwmantel_exp_results_minimal_with_labels.txt
new file mode 100644
index 0000000..ff83957
--- /dev/null
+++ b/skbio/stats/distance/tests/data/pwmantel_exp_results_minimal_with_labels.txt
@@ -0,0 +1,4 @@
+dm1	dm2	statistic	p-value	n	method	permutations	alternative
+minx	miny	0.7559289460184544	0.324	3	pearson	999	greater
+minx	minz	-0.989743318610787	1.000	3	pearson	999	greater
+miny	minz	-0.8416975766245421	0.835	3	pearson	999	greater
diff --git a/skbio/stats/distance/tests/data/pwmantel_exp_results_na_p_value.txt b/skbio/stats/distance/tests/data/pwmantel_exp_results_na_p_value.txt
new file mode 100644
index 0000000..b27c593
--- /dev/null
+++ b/skbio/stats/distance/tests/data/pwmantel_exp_results_na_p_value.txt
@@ -0,0 +1,2 @@
+dm1	dm2	statistic	p-value	n	method	permutations	alternative
+0	1	0.5	NaN	3	spearman	0	two-sided
diff --git a/skbio/stats/distance/tests/data/pwmantel_exp_results_reordered_distance_matrices.txt b/skbio/stats/distance/tests/data/pwmantel_exp_results_reordered_distance_matrices.txt
new file mode 100644
index 0000000..c0dd1be
--- /dev/null
+++ b/skbio/stats/distance/tests/data/pwmantel_exp_results_reordered_distance_matrices.txt
@@ -0,0 +1,4 @@
+dm1	dm2	statistic	p-value	n	method	permutations	alternative
+0	1	0.7559289460184544	0.324	3	pearson	999	greater
+0	2	-0.9897433186107871	1.000	3	pearson	999	greater
+1	2	-0.8416975766245421	0.822	3	pearson	999	greater
diff --git a/skbio/stats/distance/tests/test_anosim.py b/skbio/stats/distance/tests/test_anosim.py
new file mode 100644
index 0000000..f839040
--- /dev/null
+++ b/skbio/stats/distance/tests/test_anosim.py
@@ -0,0 +1,202 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+
+from functools import partial
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from pandas.util.testing import assert_series_equal
+
+from skbio import DistanceMatrix
+from skbio.stats.distance import anosim, ANOSIM
+
+
+class TestANOSIM(TestCase):
+    """All results were verified with R (vegan::anosim)."""
+
+    def setUp(self):
+        # Distance matrices with and without ties in the ranks, with 2 groups
+        # of equal size.
+        dm_ids = ['s1', 's2', 's3', 's4']
+        self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
+        self.df = pd.read_csv(
+            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
+                     's1,Control'), index_col=0)
+
+        self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
+                                       [1, 0, 3, 2],
+                                       [1, 3, 0, 3],
+                                       [4, 2, 3, 0]], dm_ids)
+
+        self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
+                                          [1, 0, 3, 2],
+                                          [5, 3, 0, 3],
+                                          [4, 2, 3, 0]], dm_ids)
+
+        # Test with 3 groups of unequal size. This data also generates a
+        # negative R statistic.
+        self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
+                                 'Treatment1', 'Control', 'Control']
+
+        # Equivalent grouping but with different labels -- groups should be
+        # assigned different integer labels but results should be the same.
+        self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
+
+        self.dm_unequal = DistanceMatrix(
+            [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
+             [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
+             [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
+             [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
+             [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
+             [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
+            ['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        # Expected series index is the same across all tests.
+        self.exp_index = ['method name', 'test statistic name', 'sample size',
+                          'number of groups', 'test statistic', 'p-value',
+                          'number of permutations']
+
+        # Stricter series equality testing than the default.
+        self.assert_series_equal = partial(assert_series_equal,
+                                           check_index_type=True,
+                                           check_series_type=True)
+
+    def test_ties(self):
+        # Ensure we get the same results if we rerun the method using the same
+        # inputs. Also ensure we get the same results if we run the method
+        # using a grouping vector or a data frame with equivalent groupings.
+        exp = pd.Series(index=self.exp_index,
+                        data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999])
+
+        for _ in range(2):
+            np.random.seed(0)
+            obs = anosim(self.dm_ties, self.grouping_equal)
+            self.assert_series_equal(obs, exp)
+
+        for _ in range(2):
+            np.random.seed(0)
+            obs = anosim(self.dm_ties, self.df, column='Group')
+            self.assert_series_equal(obs, exp)
+
+    def test_no_ties(self):
+        exp = pd.Series(index=self.exp_index,
+                        data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999])
+        np.random.seed(0)
+        obs = anosim(self.dm_no_ties, self.grouping_equal)
+        self.assert_series_equal(obs, exp)
+
+    def test_no_permutations(self):
+        exp = pd.Series(index=self.exp_index,
+                        data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0])
+        obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0)
+        self.assert_series_equal(obs, exp)
+
+    def test_unequal_group_sizes(self):
+        exp = pd.Series(index=self.exp_index,
+                        data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999])
+
+        np.random.seed(0)
+        obs = anosim(self.dm_unequal, self.grouping_unequal)
+        self.assert_series_equal(obs, exp)
+
+        np.random.seed(0)
+        obs = anosim(self.dm_unequal, self.grouping_unequal_relabeled)
+        self.assert_series_equal(obs, exp)
+
+
+class TestANOSIMClass(TestCase):
+    """All results were verified with R (vegan::anosim)."""
+
+    def setUp(self):
+        # Distance matrices with and without ties in the ranks, with 2 groups
+        # of equal size.
+        dm_ids = ['s1', 's2', 's3', 's4']
+        grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
+        df = pd.read_csv(
+            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
+                     's1,Control'), index_col=0)
+
+        self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
+                                       [1, 0, 3, 2],
+                                       [1, 3, 0, 3],
+                                       [4, 2, 3, 0]], dm_ids)
+
+        self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
+                                          [1, 0, 3, 2],
+                                          [5, 3, 0, 3],
+                                          [4, 2, 3, 0]], dm_ids)
+
+        # Test with 3 groups of unequal size. This data also generates a
+        # negative R statistic.
+        grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
+                            'Treatment1', 'Control', 'Control']
+
+        self.dm_unequal = DistanceMatrix(
+            [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
+             [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
+             [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
+             [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
+             [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
+             [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
+            ['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        self.anosim_ties = ANOSIM(self.dm_ties, grouping_equal)
+        self.anosim_no_ties = ANOSIM(self.dm_no_ties, grouping_equal)
+        self.anosim_ties_df = ANOSIM(self.dm_ties, df, column='Group')
+        self.anosim_unequal = ANOSIM(self.dm_unequal, grouping_unequal)
+
+    def test_call_ties(self):
+        # Ensure we get the same results if we rerun the method on the same
+        # object. Also ensure we get the same results if we run the method
+        # using a grouping vector or a data frame with equivalent groupings.
+        for inst in self.anosim_ties, self.anosim_ties_df:
+            for trial in range(2):
+                np.random.seed(0)
+                obs = inst()
+                self.assertEqual(obs.sample_size, 4)
+                npt.assert_array_equal(obs.groups,
+                                       ['Control', 'Fast'])
+                self.assertAlmostEqual(obs.statistic, 0.25)
+                self.assertAlmostEqual(obs.p_value, 0.671)
+                self.assertEqual(obs.permutations, 999)
+
+    def test_call_no_ties(self):
+        np.random.seed(0)
+        obs = self.anosim_no_ties()
+        self.assertEqual(obs.sample_size, 4)
+        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
+        self.assertAlmostEqual(obs.statistic, 0.625)
+        self.assertAlmostEqual(obs.p_value, 0.332)
+        self.assertEqual(obs.permutations, 999)
+
+    def test_call_no_permutations(self):
+        obs = self.anosim_no_ties(0)
+        self.assertEqual(obs.sample_size, 4)
+        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
+        self.assertAlmostEqual(obs.statistic, 0.625)
+        self.assertEqual(obs.p_value, None)
+        self.assertEqual(obs.permutations, 0)
+
+    def test_call_unequal_group_sizes(self):
+        np.random.seed(0)
+        obs = self.anosim_unequal()
+        self.assertEqual(obs.sample_size, 6)
+        npt.assert_array_equal(obs.groups,
+                               ['Control', 'Treatment1', 'Treatment2'])
+        self.assertAlmostEqual(obs.statistic, -0.363636, 6)
+        self.assertAlmostEqual(obs.p_value, 0.878)
+        self.assertEqual(obs.permutations, 999)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/distance/tests/test_base.py b/skbio/stats/distance/tests/test_base.py
new file mode 100644
index 0000000..10cca1f
--- /dev/null
+++ b/skbio/stats/distance/tests/test_base.py
@@ -0,0 +1,766 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import zip
+from six import StringIO, binary_type, text_type
+
+from unittest import TestCase, main
+
+import matplotlib as mpl
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from IPython.core.display import Image, SVG
+
+from skbio import DistanceMatrix
+from skbio.stats.distance import (
+    DissimilarityMatrixError, DistanceMatrixError, MissingIDError,
+    DissimilarityMatrix, randdm, CategoricalStatsResults)
+from skbio.stats.distance._base import (
+    _preprocess_input, _run_monte_carlo_stats, CategoricalStats)
+
+
+class DissimilarityMatrixTestData(TestCase):
+    def setUp(self):
+        self.dm_1x1_data = [[0.0]]
+        self.dm_2x2_data = [[0.0, 0.123], [0.123, 0.0]]
+        self.dm_2x2_asym_data = [[0.0, 1.0], [-2.0, 0.0]]
+        self.dm_3x3_data = [[0.0, 0.01, 4.2], [0.01, 0.0, 12.0],
+                            [4.2, 12.0, 0.0]]
+
+
+class DissimilarityMatrixTests(DissimilarityMatrixTestData):
+    def setUp(self):
+        super(DissimilarityMatrixTests, self).setUp()
+
+        self.dm_1x1 = DissimilarityMatrix(self.dm_1x1_data, ['a'])
+        self.dm_2x2 = DissimilarityMatrix(self.dm_2x2_data, ['a', 'b'])
+        self.dm_2x2_asym = DissimilarityMatrix(self.dm_2x2_asym_data,
+                                               ['a', 'b'])
+        self.dm_3x3 = DissimilarityMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
+
+        self.dms = [self.dm_1x1, self.dm_2x2, self.dm_2x2_asym, self.dm_3x3]
+        self.dm_shapes = [(1, 1), (2, 2), (2, 2), (3, 3)]
+        self.dm_sizes = [1, 4, 4, 9]
+        self.dm_transposes = [
+            self.dm_1x1, self.dm_2x2,
+            DissimilarityMatrix([[0, -2], [1, 0]], ['a', 'b']), self.dm_3x3]
+        self.dm_redundant_forms = [np.array(self.dm_1x1_data),
+                                   np.array(self.dm_2x2_data),
+                                   np.array(self.dm_2x2_asym_data),
+                                   np.array(self.dm_3x3_data)]
+
+    def test_deprecated_io(self):
+        fh = StringIO()
+        npt.assert_warns(DeprecationWarning, self.dm_3x3.to_file, fh)
+        fh.seek(0)
+        deserialized = npt.assert_warns(DeprecationWarning,
+                                        DissimilarityMatrix.from_file, fh)
+        self.assertEqual(deserialized, self.dm_3x3)
+        self.assertTrue(type(deserialized) == DissimilarityMatrix)
+
+    def test_init_from_dm(self):
+        ids = ['foo', 'bar', 'baz']
+
+        # DissimilarityMatrix -> DissimilarityMatrix
+        exp = DissimilarityMatrix(self.dm_3x3_data, ids)
+        obs = DissimilarityMatrix(self.dm_3x3, ids)
+        self.assertEqual(obs, exp)
+        # Test that copy of data is not made.
+        self.assertTrue(obs.data is self.dm_3x3.data)
+        obs.data[0, 1] = 424242
+        self.assertTrue(np.array_equal(obs.data, self.dm_3x3.data))
+
+        # DistanceMatrix -> DissimilarityMatrix
+        exp = DissimilarityMatrix(self.dm_3x3_data, ids)
+        obs = DissimilarityMatrix(
+            DistanceMatrix(self.dm_3x3_data, ('a', 'b', 'c')), ids)
+        self.assertEqual(obs, exp)
+
+        # DissimilarityMatrix -> DistanceMatrix
+        with self.assertRaises(DistanceMatrixError):
+            DistanceMatrix(self.dm_2x2_asym, ['foo', 'bar'])
+
+    def test_init_no_ids(self):
+        exp = DissimilarityMatrix(self.dm_3x3_data, ('0', '1', '2'))
+        obs = DissimilarityMatrix(self.dm_3x3_data)
+        self.assertEqual(obs, exp)
+        self.assertEqual(obs['1', '2'], 12.0)
+
+    def test_init_invalid_input(self):
+        # Empty data.
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix([], [])
+
+        # Another type of empty data.
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix(np.empty((0, 0)), [])
+
+        # Invalid number of dimensions.
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix([1, 2, 3], ['a'])
+
+        # Dimensions don't match.
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix([[1, 2, 3]], ['a'])
+
+        data = [[0, 1], [1, 0]]
+
+        # Duplicate IDs.
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix(data, ['a', 'a'])
+
+        # Number of IDs don't match dimensions.
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix(data, ['a', 'b', 'c'])
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix(data, [])
+
+        # Non-hollow.
+        data = [[0.0, 1.0], [1.0, 0.01]]
+        with self.assertRaises(DissimilarityMatrixError):
+            DissimilarityMatrix(data, ['a', 'b'])
+
+    def test_data(self):
+        for dm, exp in zip(self.dms, self.dm_redundant_forms):
+            obs = dm.data
+            self.assertTrue(np.array_equal(obs, exp))
+
+        with self.assertRaises(AttributeError):
+            self.dm_3x3.data = 'foo'
+
+    def test_ids(self):
+        obs = self.dm_3x3.ids
+        self.assertEqual(obs, ('a', 'b', 'c'))
+
+        # Test that we overwrite the existing IDs and that the ID index is
+        # correctly rebuilt.
+        new_ids = ['foo', 'bar', 'baz']
+        self.dm_3x3.ids = new_ids
+        obs = self.dm_3x3.ids
+        self.assertEqual(obs, tuple(new_ids))
+        self.assertTrue(np.array_equal(self.dm_3x3['bar'],
+                                       np.array([0.01, 0.0, 12.0])))
+        with self.assertRaises(MissingIDError):
+            self.dm_3x3['b']
+
+    def test_ids_invalid_input(self):
+        with self.assertRaises(DissimilarityMatrixError):
+            self.dm_3x3.ids = ['foo', 'bar']
+        # Make sure that we can still use the dissimilarity matrix after trying
+        # to be evil.
+        obs = self.dm_3x3.ids
+        self.assertEqual(obs, ('a', 'b', 'c'))
+
+    def test_dtype(self):
+        for dm in self.dms:
+            self.assertEqual(dm.dtype, np.float64)
+
+    def test_shape(self):
+        for dm, shape in zip(self.dms, self.dm_shapes):
+            self.assertEqual(dm.shape, shape)
+
+    def test_size(self):
+        for dm, size in zip(self.dms, self.dm_sizes):
+            self.assertEqual(dm.size, size)
+
+    def test_transpose(self):
+        for dm, transpose in zip(self.dms, self.dm_transposes):
+            self.assertEqual(dm.T, transpose)
+            self.assertEqual(dm.transpose(), transpose)
+            # We should get a reference to a different object back, even if the
+            # transpose is the same as the original.
+            self.assertTrue(dm.transpose() is not dm)
+
+    def test_index(self):
+        self.assertEqual(self.dm_3x3.index('a'), 0)
+        self.assertEqual(self.dm_3x3.index('b'), 1)
+        self.assertEqual(self.dm_3x3.index('c'), 2)
+
+        with self.assertRaises(MissingIDError):
+            self.dm_3x3.index('d')
+
+        with self.assertRaises(MissingIDError):
+            self.dm_3x3.index(1)
+
+    def test_redundant_form(self):
+        for dm, redundant in zip(self.dms, self.dm_redundant_forms):
+            obs = dm.redundant_form()
+            self.assertTrue(np.array_equal(obs, redundant))
+
+    def test_copy(self):
+        copy = self.dm_2x2.copy()
+        self.assertEqual(copy, self.dm_2x2)
+        self.assertFalse(copy.data is self.dm_2x2.data)
+        # deepcopy doesn't actually create a copy of the IDs because it is a
+        # tuple of strings, which is fully immutable.
+        self.assertTrue(copy.ids is self.dm_2x2.ids)
+
+        new_ids = ['hello', 'world']
+        copy.ids = new_ids
+        self.assertNotEqual(copy.ids, self.dm_2x2.ids)
+
+        copy = self.dm_2x2.copy()
+        copy.data[0, 1] = 0.0001
+        self.assertFalse(np.array_equal(copy.data, self.dm_2x2.data))
+
+    def test_filter_no_filtering(self):
+        # Don't actually filter anything -- ensure we get back a different
+        # object.
+        obs = self.dm_3x3.filter(['a', 'b', 'c'])
+        self.assertEqual(obs, self.dm_3x3)
+        self.assertFalse(obs is self.dm_3x3)
+
+    def test_filter_reorder(self):
+        # Don't filter anything, but reorder the distance matrix.
+        order = ['c', 'a', 'b']
+        exp = DissimilarityMatrix(
+            [[0, 4.2, 12], [4.2, 0, 0.01], [12, 0.01, 0]], order)
+        obs = self.dm_3x3.filter(order)
+        self.assertEqual(obs, exp)
+
+    def test_filter_single_id(self):
+        ids = ['b']
+        exp = DissimilarityMatrix([[0]], ids)
+        obs = self.dm_2x2_asym.filter(ids)
+        self.assertEqual(obs, exp)
+
+    def test_filter_asymmetric(self):
+        # 2x2
+        ids = ['b', 'a']
+        exp = DissimilarityMatrix([[0, -2], [1, 0]], ids)
+        obs = self.dm_2x2_asym.filter(ids)
+        self.assertEqual(obs, exp)
+
+        # 3x3
+        dm = DissimilarityMatrix([[0, 10, 53], [42, 0, 22.5], [53, 1, 0]],
+                                 ('bro', 'brah', 'breh'))
+        ids = ['breh', 'brah']
+        exp = DissimilarityMatrix([[0, 1], [22.5, 0]], ids)
+        obs = dm.filter(ids)
+        self.assertEqual(obs, exp)
+
+    def test_filter_subset(self):
+        ids = ('c', 'a')
+        exp = DissimilarityMatrix([[0, 4.2], [4.2, 0]], ids)
+        obs = self.dm_3x3.filter(ids)
+        self.assertEqual(obs, exp)
+
+        ids = ('b', 'a')
+        exp = DissimilarityMatrix([[0, 0.01], [0.01, 0]], ids)
+        obs = self.dm_3x3.filter(ids)
+        self.assertEqual(obs, exp)
+
+        # 4x4
+        dm = DissimilarityMatrix([[0, 1, 55, 7], [1, 0, 16, 1],
+                                  [55, 16, 0, 23], [7, 1, 23, 0]])
+        ids = np.asarray(['3', '0', '1'])
+        exp = DissimilarityMatrix([[0, 7, 1], [7, 0, 1], [1, 1, 0]], ids)
+        obs = dm.filter(ids)
+        self.assertEqual(obs, exp)
+
+    def test_filter_duplicate_ids(self):
+        with self.assertRaises(DissimilarityMatrixError):
+            self.dm_3x3.filter(['c', 'a', 'c'])
+
+    def test_filter_missing_ids(self):
+        with self.assertRaises(MissingIDError):
+            self.dm_3x3.filter(['c', 'bro'])
+
+    def test_filter_missing_ids_strict_false(self):
+        # no exception should be raised
+        ids = ('c', 'a')
+        exp = DissimilarityMatrix([[0, 4.2], [4.2, 0]], ids)
+        obs = self.dm_3x3.filter(['c', 'a', 'not found'], strict=False)
+        self.assertEqual(obs, exp)
+
+    def test_filter_empty_ids(self):
+        with self.assertRaises(DissimilarityMatrixError):
+            self.dm_3x3.filter([])
+
+    def test_plot_default(self):
+        fig = self.dm_1x1.plot()
+        self.assertIsInstance(fig, mpl.figure.Figure)
+        axes = fig.get_axes()
+        self.assertEqual(len(axes), 2)
+        ax = axes[0]
+        self.assertEqual(ax.get_title(), '')
+        xticks = []
+        for tick in ax.get_xticklabels():
+            xticks.append(tick.get_text())
+        self.assertEqual(xticks, ['a'])
+        yticks = []
+        for tick in ax.get_yticklabels():
+            yticks.append(tick.get_text())
+        self.assertEqual(yticks, ['a'])
+
+    def test_plot_no_default(self):
+        ids = ['0', 'one', '2', 'three', '4.000']
+        data = ([0, 1, 2, 3, 4], [1, 0, 1, 2, 3], [2, 1, 0, 1, 2],
+                [3, 2, 1, 0, 1], [4, 3, 2, 1, 0])
+        dm = DissimilarityMatrix(data, ids)
+        fig = dm.plot(cmap='Reds', title='Testplot')
+        self.assertIsInstance(fig, mpl.figure.Figure)
+        axes = fig.get_axes()
+        self.assertEqual(len(axes), 2)
+        ax = axes[0]
+        self.assertEqual(ax.get_title(), 'Testplot')
+        xticks = []
+        for tick in ax.get_xticklabels():
+            xticks.append(tick.get_text())
+        self.assertEqual(xticks, ['0', 'one', '2', 'three', '4.000'])
+        yticks = []
+        for tick in ax.get_yticklabels():
+            yticks.append(tick.get_text())
+        self.assertEqual(yticks, ['0', 'one', '2', 'three', '4.000'])
+
+    def test_repr_png(self):
+        dm = self.dm_1x1
+        obs = dm._repr_png_()
+        self.assertIsInstance(obs, binary_type)
+        self.assertTrue(len(obs) > 0)
+
+    def test_repr_svg(self):
+        obs = self.dm_1x1._repr_svg_()
+        # print_figure(format='svg') can return text or bytes depending on the
+        # version of IPython
+        self.assertTrue(isinstance(obs, text_type) or
+                        isinstance(obs, binary_type))
+        self.assertTrue(len(obs) > 0)
+
+    def test_png(self):
+        dm = self.dm_1x1
+        self.assertIsInstance(dm.png, Image)
+
+    def test_svg(self):
+        dm = self.dm_1x1
+        self.assertIsInstance(dm.svg, SVG)
+
+    def test_str(self):
+        for dm in self.dms:
+            obs = str(dm)
+            # Do some very light testing here to make sure we're getting a
+            # non-empty string back. We don't want to test the exact
+            # formatting.
+            self.assertTrue(obs)
+
+    def test_eq(self):
+        for dm in self.dms:
+            copy = dm.copy()
+            self.assertTrue(dm == dm)
+            self.assertTrue(copy == copy)
+            self.assertTrue(dm == copy)
+            self.assertTrue(copy == dm)
+
+        self.assertFalse(self.dm_1x1 == self.dm_3x3)
+
+    def test_ne(self):
+        # Wrong class.
+        self.assertTrue(self.dm_3x3 != 'foo')
+
+        # Wrong shape.
+        self.assertTrue(self.dm_3x3 != self.dm_1x1)
+
+        # Wrong IDs.
+        other = self.dm_3x3.copy()
+        other.ids = ['foo', 'bar', 'baz']
+        self.assertTrue(self.dm_3x3 != other)
+
+        # Wrong data.
+        other = self.dm_3x3.copy()
+        other.data[1, 0] = 42.42
+        self.assertTrue(self.dm_3x3 != other)
+
+        self.assertFalse(self.dm_2x2 != self.dm_2x2)
+
+    def test_contains(self):
+        self.assertTrue('a' in self.dm_3x3)
+        self.assertTrue('b' in self.dm_3x3)
+        self.assertTrue('c' in self.dm_3x3)
+        self.assertFalse('d' in self.dm_3x3)
+
+    def test_getslice(self):
+        # Slice of first dimension only. Test that __getslice__ defers to
+        # __getitem__.
+        obs = self.dm_2x2[1:]
+        self.assertTrue(np.array_equal(obs, np.array([[0.123, 0.0]])))
+        self.assertEqual(type(obs), np.ndarray)
+
+    def test_getitem_by_id(self):
+        obs = self.dm_1x1['a']
+        self.assertTrue(np.array_equal(obs, np.array([0.0])))
+
+        obs = self.dm_2x2_asym['b']
+        self.assertTrue(np.array_equal(obs, np.array([-2.0, 0.0])))
+
+        obs = self.dm_3x3['c']
+        self.assertTrue(np.array_equal(obs, np.array([4.2, 12.0, 0.0])))
+
+        with self.assertRaises(MissingIDError):
+            self.dm_2x2['c']
+
+    def test_getitem_by_id_pair(self):
+        # Same object.
+        self.assertEqual(self.dm_1x1['a', 'a'], 0.0)
+
+        # Different objects (symmetric).
+        self.assertEqual(self.dm_3x3['b', 'c'], 12.0)
+        self.assertEqual(self.dm_3x3['c', 'b'], 12.0)
+
+        # Different objects (asymmetric).
+        self.assertEqual(self.dm_2x2_asym['a', 'b'], 1.0)
+        self.assertEqual(self.dm_2x2_asym['b', 'a'], -2.0)
+
+        with self.assertRaises(MissingIDError):
+            self.dm_2x2['a', 'c']
+
+    def test_getitem_ndarray_indexing(self):
+        # Single element access.
+        obs = self.dm_3x3[0, 1]
+        self.assertEqual(obs, 0.01)
+
+        # Single element access (via two __getitem__ calls).
+        obs = self.dm_3x3[0][1]
+        self.assertEqual(obs, 0.01)
+
+        # Row access.
+        obs = self.dm_3x3[1]
+        self.assertTrue(np.array_equal(obs, np.array([0.01, 0.0, 12.0])))
+        self.assertEqual(type(obs), np.ndarray)
+
+        # Grab all data.
+        obs = self.dm_3x3[:, :]
+        self.assertTrue(np.array_equal(obs, self.dm_3x3.data))
+        self.assertEqual(type(obs), np.ndarray)
+
+        with self.assertRaises(IndexError):
+            self.dm_3x3[:, 3]
+
+    def test_validate_invalid_dtype(self):
+        with self.assertRaises(DissimilarityMatrixError):
+            self.dm_3x3._validate(np.array([[0, 42], [42, 0]]), ['a', 'b'])
+
+
+class DistanceMatrixTests(DissimilarityMatrixTestData):
+    def setUp(self):
+        super(DistanceMatrixTests, self).setUp()
+
+        self.dm_1x1 = DistanceMatrix(self.dm_1x1_data, ['a'])
+        self.dm_2x2 = DistanceMatrix(self.dm_2x2_data, ['a', 'b'])
+        self.dm_3x3 = DistanceMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
+
+        self.dms = [self.dm_1x1, self.dm_2x2, self.dm_3x3]
+        self.dm_condensed_forms = [np.array([]), np.array([0.123]),
+                                   np.array([0.01, 4.2, 12.0])]
+
+    def test_deprecated_io(self):
+        fh = StringIO()
+        npt.assert_warns(DeprecationWarning, self.dm_3x3.to_file, fh)
+        fh.seek(0)
+        deserialized = npt.assert_warns(DeprecationWarning,
+                                        DistanceMatrix.from_file, fh)
+        self.assertEqual(deserialized, self.dm_3x3)
+        self.assertTrue(type(deserialized) == DistanceMatrix)
+
+    def test_init_invalid_input(self):
+        # Asymmetric.
+        data = [[0.0, 2.0], [1.0, 0.0]]
+        with self.assertRaises(DistanceMatrixError):
+            DistanceMatrix(data, ['a', 'b'])
+
+        # Ensure that the superclass validation is still being performed.
+        with self.assertRaises(DissimilarityMatrixError):
+            DistanceMatrix([[1, 2, 3]], ['a'])
+
+    def test_condensed_form(self):
+        for dm, condensed in zip(self.dms, self.dm_condensed_forms):
+            obs = dm.condensed_form()
+            self.assertTrue(np.array_equal(obs, condensed))
+
+    def test_permute_condensed(self):
+        # Can't really permute a 1x1 or 2x2...
+        for _ in range(2):
+            obs = self.dm_1x1.permute(condensed=True)
+            npt.assert_equal(obs, np.array([]))
+
+        for _ in range(2):
+            obs = self.dm_2x2.permute(condensed=True)
+            npt.assert_equal(obs, np.array([0.123]))
+
+        dm_copy = self.dm_3x3.copy()
+
+        np.random.seed(0)
+
+        obs = self.dm_3x3.permute(condensed=True)
+        npt.assert_equal(obs, np.array([12.0, 4.2, 0.01]))
+
+        obs = self.dm_3x3.permute(condensed=True)
+        npt.assert_equal(obs, np.array([4.2, 12.0, 0.01]))
+
+        # Ensure dm hasn't changed after calling permute() on it a couple of
+        # times.
+        self.assertEqual(self.dm_3x3, dm_copy)
+
+    def test_permute_not_condensed(self):
+        obs = self.dm_1x1.permute()
+        self.assertEqual(obs, self.dm_1x1)
+        self.assertFalse(obs is self.dm_1x1)
+
+        obs = self.dm_2x2.permute()
+        self.assertEqual(obs, self.dm_2x2)
+        self.assertFalse(obs is self.dm_2x2)
+
+        np.random.seed(0)
+
+        exp = DistanceMatrix([[0, 12, 4.2],
+                              [12, 0, 0.01],
+                              [4.2, 0.01, 0]], self.dm_3x3.ids)
+        obs = self.dm_3x3.permute()
+        self.assertEqual(obs, exp)
+
+        exp = DistanceMatrix([[0, 4.2, 12],
+                              [4.2, 0, 0.01],
+                              [12, 0.01, 0]], self.dm_3x3.ids)
+        obs = self.dm_3x3.permute()
+        self.assertEqual(obs, exp)
+
+    def test_eq(self):
+        # Compare DistanceMatrix to DissimilarityMatrix, where both have the
+        # same data and IDs.
+        eq_dm = DissimilarityMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
+        self.assertTrue(self.dm_3x3 == eq_dm)
+        self.assertTrue(eq_dm == self.dm_3x3)
+
+
+class RandomDistanceMatrixTests(TestCase):
+    def test_default_usage(self):
+        exp = DistanceMatrix(np.asarray([[0.0]]), ['1'])
+        obs = randdm(1)
+        self.assertEqual(obs, exp)
+
+        obs = randdm(2)
+        self.assertEqual(obs.shape, (2, 2))
+        self.assertEqual(obs.ids, ('1', '2'))
+
+        obs1 = randdm(5)
+        num_trials = 10
+        found_diff = False
+        for _ in range(num_trials):
+            obs2 = randdm(5)
+
+            if obs1 != obs2:
+                found_diff = True
+                break
+
+        self.assertTrue(found_diff)
+
+    def test_ids(self):
+        ids = ['foo', 'bar', 'baz']
+        obs = randdm(3, ids=ids)
+        self.assertEqual(obs.shape, (3, 3))
+        self.assertEqual(obs.ids, tuple(ids))
+
+    def test_constructor(self):
+        exp = DissimilarityMatrix(np.asarray([[0.0]]), ['1'])
+        obs = randdm(1, constructor=DissimilarityMatrix)
+        self.assertEqual(obs, exp)
+        self.assertEqual(type(obs), DissimilarityMatrix)
+
+    def test_random_fn(self):
+        def myrand(num_rows, num_cols):
+            # One dm to rule them all...
+            data = np.empty((num_rows, num_cols))
+            data.fill(42)
+            return data
+
+        exp = DistanceMatrix(np.asarray([[0, 42, 42], [42, 0, 42],
+                                         [42, 42, 0]]), ['1', '2', '3'])
+        obs = randdm(3, random_fn=myrand)
+        self.assertEqual(obs, exp)
+
+    def test_invalid_input(self):
+        # Invalid dimensions.
+        with self.assertRaises(DissimilarityMatrixError):
+            randdm(0)
+
+        # Invalid dimensions.
+        with self.assertRaises(ValueError):
+            randdm(-1)
+
+        # Invalid number of IDs.
+        with self.assertRaises(DissimilarityMatrixError):
+            randdm(2, ids=['foo'])
+
+
+class CategoricalStatsHelperFunctionTests(TestCase):
+    def setUp(self):
+        self.dm = DistanceMatrix([[0.0, 1.0, 2.0],
+                                  [1.0, 0.0, 3.0],
+                                  [2.0, 3.0, 0.0]], ['a', 'b', 'c'])
+        self.grouping = [1, 2, 1]
+        # Ordering of IDs shouldn't matter, nor should extra IDs.
+        self.df = pd.read_csv(
+            StringIO('ID,Group\nb,Group2\na,Group1\nc,Group1\nd,Group3'),
+            index_col=0)
+        self.df_missing_id = pd.read_csv(
+            StringIO('ID,Group\nb,Group2\nc,Group1'), index_col=0)
+
+    def test_preprocess_input_with_valid_input(self):
+        # Should obtain same result using grouping vector or data frame.
+        exp = (3, 2, np.array([0, 1, 0]),
+               (np.array([0, 0, 1]), np.array([1, 2, 2])),
+               np.array([1., 2., 3.]))
+
+        obs = _preprocess_input(self.dm, self.grouping, None)
+        npt.assert_equal(obs, exp)
+
+        obs = _preprocess_input(self.dm, self.df, 'Group')
+        npt.assert_equal(obs, exp)
+
+    def test_preprocess_input_raises_error(self):
+        # Requires a DistanceMatrix.
+        with self.assertRaises(TypeError):
+            _preprocess_input(
+                DissimilarityMatrix([[0, 2], [3, 0]], ['a', 'b']),
+                [1, 2], None)
+
+        # Requires column if DataFrame.
+        with self.assertRaises(ValueError):
+            _preprocess_input(self.dm, self.df, None)
+
+        # Cannot provide column if not data frame.
+        with self.assertRaises(ValueError):
+            _preprocess_input(self.dm, self.grouping, 'Group')
+
+        # Column must exist in data frame.
+        with self.assertRaises(ValueError):
+            _preprocess_input(self.dm, self.df, 'foo')
+
+        # All distance matrix IDs must be in data frame.
+        with self.assertRaises(ValueError):
+            _preprocess_input(self.dm, self.df_missing_id, 'Group')
+
+        # Grouping vector length must match number of objects in dm.
+        with self.assertRaises(ValueError):
+            _preprocess_input(self.dm, [1, 2], None)
+
+        # Grouping vector cannot have only unique values.
+        with self.assertRaises(ValueError):
+            _preprocess_input(self.dm, [1, 2, 3], None)
+
+        # Grouping vector cannot have only a single group.
+        with self.assertRaises(ValueError):
+            _preprocess_input(self.dm, [1, 1, 1], None)
+
+    def test_run_monte_carlo_stats_with_permutations(self):
+        obs = _run_monte_carlo_stats(lambda e: 42, self.grouping, 50)
+        npt.assert_equal(obs, (42, 1.0))
+
+    def test_run_monte_carlo_stats_no_permutations(self):
+        obs = _run_monte_carlo_stats(lambda e: 42, self.grouping, 0)
+        npt.assert_equal(obs, (42, np.nan))
+
+    def test_run_monte_carlo_stats_invalid_permutations(self):
+        with self.assertRaises(ValueError):
+            _run_monte_carlo_stats(lambda e: 42, self.grouping, -1)
+
+
+class CategoricalStatsTests(TestCase):
+    def setUp(self):
+        self.dm = DistanceMatrix([[0.0, 1.0, 2.0], [1.0, 0.0, 3.0],
+                                  [2.0, 3.0, 0.0]], ['a', 'b', 'c'])
+        self.grouping = [1, 2, 1]
+        # Ordering of IDs shouldn't matter, nor should extra IDs.
+        self.df = pd.read_csv(
+            StringIO('ID,Group\nb,Group1\na,Group2\nc,Group1\nd,Group3'),
+            index_col=0)
+        self.df_missing_id = pd.read_csv(
+            StringIO('ID,Group\nb,Group1\nc,Group1'), index_col=0)
+        self.categorical_stats = CategoricalStats(self.dm, self.grouping)
+        self.categorical_stats_from_df = CategoricalStats(self.dm, self.df,
+                                                          column='Group')
+
+    def test_init_invalid_input(self):
+        # Requires a DistanceMatrix.
+        with self.assertRaises(TypeError):
+            CategoricalStats(DissimilarityMatrix([[0, 2], [3, 0]], ['a', 'b']),
+                             [1, 2])
+
+        # Requires column if DataFrame.
+        with self.assertRaises(ValueError):
+            CategoricalStats(self.dm, self.df)
+
+        # Cannot provide column if not data frame.
+        with self.assertRaises(ValueError):
+            CategoricalStats(self.dm, self.grouping, column='Group')
+
+        # Column must exist in data frame.
+        with self.assertRaises(ValueError):
+            CategoricalStats(self.dm, self.df, column='foo')
+
+        # All distance matrix IDs must be in data frame.
+        with self.assertRaises(ValueError):
+            CategoricalStats(self.dm, self.df_missing_id, column='Group')
+
+        # Grouping vector length must match number of objects in dm.
+        with self.assertRaises(ValueError):
+            CategoricalStats(self.dm, [1, 2])
+
+        # Grouping vector cannot have only unique values.
+        with self.assertRaises(ValueError):
+            CategoricalStats(self.dm, [1, 2, 3])
+
+        # Grouping vector cannot have only a single group.
+        with self.assertRaises(ValueError):
+            CategoricalStats(self.dm, [1, 1, 1])
+
+    def test_call(self):
+        with self.assertRaises(NotImplementedError):
+            self.categorical_stats()
+
+    def test_call_invalid_permutations(self):
+        with self.assertRaises(ValueError):
+            self.categorical_stats(-1)
+
+
+class CategoricalStatsResultsTests(TestCase):
+    def setUp(self):
+        self.results = CategoricalStatsResults('foo', 'Foo', 'my stat', 42,
+                                               ['a', 'b', 'c', 'd'],
+                                               0.01234567890, 0.1151111, 99)
+
+    def test_str(self):
+        exp = ('Method name  Sample size  Number of groups       my stat  '
+               'p-value  Number of permutations\n        foo           42'
+               '                 4  0.0123456789     0.12'
+               '                      99\n')
+        obs = str(self.results)
+        self.assertEqual(obs, exp)
+
+    def test_repr_html(self):
+        # Not going to test against exact HTML that we expect, as this could
+        # easily break and be annoying to constantly update. Do some light
+        # sanity-checking to ensure there are some of the expected HTML tags.
+        obs = self.results._repr_html_()
+        self.assertTrue('<table' in obs)
+        self.assertTrue('<thead' in obs)
+        self.assertTrue('<tr' in obs)
+        self.assertTrue('<th' in obs)
+        self.assertTrue('<tbody' in obs)
+        self.assertTrue('<td' in obs)
+
+    def test_summary(self):
+        exp = ('Method name\tSample size\tNumber of groups\tmy stat\tp-value\t'
+               'Number of permutations\nfoo\t42\t4\t0.0123456789\t0.12\t99\n')
+        obs = self.results.summary()
+        self.assertEqual(obs, exp)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/distance/tests/test_bioenv.py b/skbio/stats/distance/tests/test_bioenv.py
new file mode 100644
index 0000000..e83fdc5
--- /dev/null
+++ b/skbio/stats/distance/tests/test_bioenv.py
@@ -0,0 +1,222 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from unittest import TestCase, main
+
+import numpy as np
+import pandas as pd
+from pandas.util.testing import assert_frame_equal
+
+from skbio import DistanceMatrix
+from skbio.stats.distance import bioenv
+from skbio.stats.distance._bioenv import _scale
+from skbio.util import get_data_path
+
+
+class BIOENVTests(TestCase):
+    """Results were verified with R 3.0.2 and vegan 2.0-10 (vegan::bioenv)."""
+
+    def setUp(self):
+        # The test dataset used here is a subset of the Lauber et al. 2009
+        # "88 Soils" dataset. It has been altered to exercise various aspects
+        # of the code, including (but not limited to):
+        #
+        # - order of distance matrix IDs and IDs in data frame (metadata) are
+        #   not exactly the same
+        # - data frame has an extra sample that is not in the distance matrix
+        # - this extra sample has non-numeric and missing values in some of its
+        #   cells
+        #
+        # Additional variations of the distance matrix and data frame are used
+        # to test different orderings of rows/columns, extra non-numeric data
+        # frame columns, etc.
+        #
+        # This dataset is also useful because it is non-trivial in size (6
+        # samples, 11 environment variables) and it includes positive/negative
+        # floats and integers in the data frame.
+        self.dm = DistanceMatrix.read(get_data_path('dm.txt'))
+
+        # Reordered rows and columns (i.e., different ID order). Still
+        # conceptually the same distance matrix.
+        self.dm_reordered = DistanceMatrix.read(
+            get_data_path('dm_reordered.txt'))
+
+        self.df = pd.read_csv(get_data_path('df.txt'), sep='\t', index_col=0)
+
+        # Similar to the above data frame, except that it has an extra
+        # non-numeric column, and some of the other rows and columns have been
+        # reordered.
+        self.df_extra_column = pd.read_csv(
+            get_data_path('df_extra_column.txt'), sep='\t', index_col=0)
+
+        # All columns in the original data frame (these are all numeric
+        # columns).
+        self.cols = self.df.columns.tolist()
+
+        # This second dataset is derived from vegan::bioenv's example dataset
+        # (varespec and varechem). The original dataset includes a site x
+        # species table (e.g., OTU table) and a data frame of environmental
+        # variables. Since the bioenv function defined here accepts a distance
+        # matrix, we use a Bray-Curtis distance matrix that is derived from the
+        # site x species table (this matches what is done by vegan::bioenv when
+        # provided an OTU table, using their default distance measure). The
+        # data frame only includes the numeric environmental variables we're
+        # interested in for these tests: log(N), P, K, Ca, pH, Al
+        self.dm_vegan = DistanceMatrix.read(
+            get_data_path('bioenv_dm_vegan.txt'))
+        self.df_vegan = pd.read_csv(
+            get_data_path('bioenv_df_vegan.txt'), sep='\t',
+            converters={0: str})
+        self.df_vegan.set_index('#SampleID', inplace=True)
+
+        # Load expected results.
+        self.exp_results = pd.read_csv(get_data_path('exp_results.txt'),
+                                       sep='\t', index_col=0)
+        self.exp_results_single_column = pd.read_csv(
+            get_data_path('exp_results_single_column.txt'), sep='\t',
+            index_col=0)
+        self.exp_results_different_column_order = pd.read_csv(
+            get_data_path('exp_results_different_column_order.txt'), sep='\t',
+            index_col=0)
+        self.exp_results_vegan = pd.read_csv(
+            get_data_path('bioenv_exp_results_vegan.txt'), sep='\t',
+            index_col=0)
+
+    def test_bioenv_all_columns_implicit(self):
+        # Test with all columns in data frame (implicitly).
+        obs = bioenv(self.dm, self.df)
+        assert_frame_equal(obs, self.exp_results)
+
+        # Should get the same results if order of rows/cols in distance matrix
+        # is changed.
+        obs = bioenv(self.dm_reordered, self.df)
+        assert_frame_equal(obs, self.exp_results)
+
+    def test_bioenv_all_columns_explicit(self):
+        # Test with all columns being specified.
+        obs = bioenv(self.dm, self.df, columns=self.cols)
+        assert_frame_equal(obs, self.exp_results)
+
+        # Test against a data frame that has an extra non-numeric column and
+        # some of the rows and columns reordered (we should get the same
+        # result since we're specifying the same columns in the same order).
+        obs = bioenv(self.dm, self.df_extra_column, columns=self.cols)
+        assert_frame_equal(obs, self.exp_results)
+
+    def test_bioenv_single_column(self):
+        obs = bioenv(self.dm, self.df, columns=['PH'])
+        assert_frame_equal(obs, self.exp_results_single_column)
+
+    def test_bioenv_different_column_order(self):
+        # Specifying columns in a different order will change the row labels in
+        # the results data frame as the column subsets will be reordered, but
+        # the actual results (e.g., correlation coefficients) shouldn't change.
+        obs = bioenv(self.dm, self.df, columns=self.cols[::-1])
+        assert_frame_equal(obs, self.exp_results_different_column_order)
+
+    def test_bioenv_no_side_effects(self):
+        # Deep copies of both primary inputs.
+        dm_copy = self.dm.copy()
+        df_copy = self.df.copy(deep=True)
+
+        bioenv(self.dm, self.df)
+
+        # Make sure we haven't modified the primary input in some way (e.g.,
+        # with scaling, type conversions, etc.).
+        self.assertEqual(self.dm, dm_copy)
+        assert_frame_equal(self.df, df_copy)
+
+    def test_bioenv_vegan_example(self):
+        # The correlation coefficient in the first row of the
+        # results (rho=0.2516) is different from the correlation coefficient
+        # computed by vegan (rho=0.2513). This seems to occur due to
+        # differences in numerical precision when calculating the Euclidean
+        # distances, which affects the rank calculations in Spearman
+        # (specifically, dealing with ties). The ranked distances end up being
+        # slightly different between vegan and our implementation because some
+        # distances are treated as ties in vegan but treated as distinct values
+        # in our implementation. This explains the difference in rho values. I
+        # verified that using Pearson correlation instead of Spearman on the
+        # same distances yields *very* similar results. Thus, the discrepancy
+        # seems to stem from differences when computing ranks/ties.
+        obs = bioenv(self.dm_vegan, self.df_vegan)
+        assert_frame_equal(obs, self.exp_results_vegan)
+
+    def test_bioenv_no_distance_matrix(self):
+        with self.assertRaises(TypeError):
+            bioenv('breh', self.df)
+
+    def test_bioenv_no_data_frame(self):
+        with self.assertRaises(TypeError):
+            bioenv(self.dm, None)
+
+    def test_bioenv_duplicate_columns(self):
+        with self.assertRaises(ValueError):
+            bioenv(self.dm, self.df, columns=self.cols + ['PH'])
+
+    def test_bioenv_no_columns(self):
+        with self.assertRaises(ValueError):
+            bioenv(self.dm, self.df, columns=[])
+
+    def test_bioenv_missing_columns(self):
+        with self.assertRaises(ValueError):
+            bioenv(self.dm, self.df, columns=self.cols + ['brofist'])
+
+    def test_bioenv_missing_distance_matrix_ids(self):
+        df = self.df[1:]
+        with self.assertRaises(ValueError):
+            bioenv(self.dm, df)
+
+    def test_bioenv_nans(self):
+        df = self.df.replace(53.9, np.nan)
+        with self.assertRaises(ValueError):
+            bioenv(self.dm, df)
+
+    def test_bioenv_nonnumeric_columns(self):
+        df = self.df.replace(2400, 'no cog yay')
+        with self.assertRaises(TypeError):
+            bioenv(self.dm, df)
+
+        with self.assertRaises(TypeError):
+            bioenv(self.dm, self.df_extra_column)
+
+    def test_scale_single_column(self):
+        df = pd.DataFrame([[1], [0], [2]], index=['A', 'B', 'C'],
+                          columns=['foo'])
+        exp = pd.DataFrame([[0.0], [-1.0], [1.0]], index=['A', 'B', 'C'],
+                           columns=['foo'])
+        obs = _scale(df)
+        assert_frame_equal(obs, exp)
+
+    def test_scale_multiple_columns(self):
+        # Floats and ints, including positives and negatives.
+        df = pd.DataFrame([[7.0, 400, -1],
+                           [8.0, 530, -5],
+                           [7.5, 450, 1],
+                           [8.5, 810, -4]],
+                          index=['A', 'B', 'C', 'D'],
+                          columns=['pH', 'Elevation', 'negatives'])
+        exp = pd.DataFrame([[-1.161895, -0.805979, 0.453921],
+                            [0.387298, -0.095625, -0.998625],
+                            [-0.387298, -0.532766, 1.180194],
+                            [1.161895, 1.434369, -0.635489]],
+                           index=['A', 'B', 'C', 'D'],
+                           columns=['pH', 'Elevation', 'negatives'])
+        obs = _scale(df)
+        assert_frame_equal(obs, exp)
+
+    def test_scale_no_variance(self):
+        df = pd.DataFrame([[-7.0, -1.2], [6.2, -1.2], [2.9, -1.2]],
+                          index=['A', 'B', 'C'], columns=['foo', 'bar'])
+        with self.assertRaises(ValueError):
+            _scale(df)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/distance/tests/test_mantel.py b/skbio/stats/distance/tests/test_mantel.py
new file mode 100644
index 0000000..cff8f4c
--- /dev/null
+++ b/skbio/stats/distance/tests/test_mantel.py
@@ -0,0 +1,569 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from pandas.util.testing import assert_frame_equal
+
+from skbio import DistanceMatrix
+from skbio.stats.distance import (DissimilarityMatrixError,
+                                  DistanceMatrixError, mantel, pwmantel)
+from skbio.stats.distance._mantel import _order_dms
+from skbio.util import get_data_path
+
+
+class MantelTestData(TestCase):
+    def setUp(self):
+        # Small dataset of minimal size (3x3). Mix of floats and ints in a
+        # native Python nested list structure.
+        self.minx = [[0, 1, 2], [1, 0, 3], [2, 3, 0]]
+        self.miny = [[0, 2, 7], [2, 0, 6], [7, 6, 0]]
+        self.minz = [[0, 0.5, 0.25], [0.5, 0, 0.1], [0.25, 0.1, 0]]
+
+        # Version of the above dataset stored as DistanceMatrix instances.
+        self.minx_dm = DistanceMatrix(self.minx)
+        self.miny_dm = DistanceMatrix(self.miny)
+        self.minz_dm = DistanceMatrix(self.minz)
+
+        # Versions of self.minx_dm and self.minz_dm that each have an extra ID
+        # on the end.
+        self.minx_dm_extra = DistanceMatrix([[0, 1, 2, 7],
+                                             [1, 0, 3, 2],
+                                             [2, 3, 0, 4],
+                                             [7, 2, 4, 0]],
+                                            ['0', '1', '2', 'foo'])
+        self.minz_dm_extra = DistanceMatrix([[0, 0.5, 0.25, 3],
+                                             [0.5, 0, 0.1, 24],
+                                             [0.25, 0.1, 0, 5],
+                                             [3, 24, 5, 0]],
+                                            ['0', '1', '2', 'bar'])
+
+
+class MantelTests(MantelTestData):
+    """Results were verified with R 3.1.0 and vegan 2.0-10 (vegan::mantel).
+
+    vegan::mantel performs a one-sided (greater) test and does not have the
+    option to specify different alternative hypotheses. In order to test the
+    other alternative hypotheses, I modified vegan::mantel to perform the
+    appropriate test, source()'d the file and verified the output.
+
+    """
+
+    def setUp(self):
+        super(MantelTests, self).setUp()
+
+        self.methods = ('pearson', 'spearman')
+        self.alternatives = ('two-sided', 'greater', 'less')
+
+        # No variation in distances. Taken from Figure 10.20(b), pg. 603 in L&L
+        # 3rd edition. Their example is 4x4 but using 3x3 here for easy
+        # comparison to the minimal dataset above.
+        self.no_variation = [[0, 0.667, 0.667],
+                             [0.667, 0, 0.667],
+                             [0.667, 0.667, 0]]
+
+        # This second dataset is derived from vegan::mantel's example dataset.
+        # The "veg" distance matrix contains Bray-Curtis distances derived from
+        # the varespec data (named "veg.dist" in the example). The "env"
+        # distance matrix contains Euclidean distances derived from scaled
+        # varechem data (named "env.dist" in the example).
+        self.veg_dm_vegan = np.loadtxt(
+            get_data_path('mantel_veg_dm_vegan.txt'))
+        self.env_dm_vegan = np.loadtxt(
+            get_data_path('mantel_env_dm_vegan.txt'))
+
+        # Expected test statistic when comparing x and y with method='pearson'.
+        self.exp_x_vs_y = 0.7559289
+
+        # Expected test statistic when comparing x and z with method='pearson'.
+        self.exp_x_vs_z = -0.9897433
+
+    def test_statistic_same_across_alternatives_and_permutations(self):
+        # Varying permutations and alternative hypotheses shouldn't affect the
+        # computed test statistics.
+        for n in (0, 99, 999):
+            for alt in self.alternatives:
+                for method, exp in (('pearson', self.exp_x_vs_y),
+                                    ('spearman', 0.5)):
+                    obs = mantel(self.minx, self.miny, method=method,
+                                 permutations=n, alternative=alt)[0]
+                    self.assertAlmostEqual(obs, exp)
+
+    def test_comparing_same_matrices(self):
+        for method in self.methods:
+            obs = mantel(self.minx, self.minx, method=method)[0]
+            self.assertAlmostEqual(obs, 1)
+
+            obs = mantel(self.miny, self.miny, method=method)[0]
+            self.assertAlmostEqual(obs, 1)
+
+    def test_negative_correlation(self):
+        for method, exp in (('pearson', self.exp_x_vs_z), ('spearman', -1)):
+            obs = mantel(self.minx, self.minz, method=method)[0]
+            self.assertAlmostEqual(obs, exp)
+
+    def test_zero_permutations(self):
+        for alt in self.alternatives:
+            for method, exp in (('pearson', self.exp_x_vs_y),
+                                ('spearman', 0.5)):
+                obs = mantel(self.minx, self.miny, permutations=0,
+                             method=method, alternative=alt)
+                self.assertAlmostEqual(obs[0], exp)
+                npt.assert_equal(obs[1], np.nan)
+                self.assertEqual(obs[2], 3)
+
+                # swapping order of matrices should give same result
+                obs = mantel(self.miny, self.minx, permutations=0,
+                             method=method, alternative=alt)
+                self.assertAlmostEqual(obs[0], exp)
+                npt.assert_equal(obs[1], np.nan)
+                self.assertEqual(obs[2], 3)
+
+    def test_distance_matrix_instances_as_input(self):
+        # Matrices with all matching IDs in the same order.
+        np.random.seed(0)
+
+        obs = mantel(self.minx_dm, self.miny_dm, alternative='less')
+
+        self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
+        self.assertAlmostEqual(obs[1], 0.843)
+        self.assertEqual(obs[2], 3)
+
+    def test_distance_matrix_instances_with_reordering_and_nonmatching(self):
+        x = self.minx_dm_extra.filter(['1', '0', 'foo', '2'])
+        y = self.miny_dm.filter(['0', '2', '1'])
+
+        # strict=True should disallow IDs that aren't found in both matrices
+        with self.assertRaises(ValueError):
+            mantel(x, y, alternative='less', strict=True)
+
+        np.random.seed(0)
+
+        # strict=False should ignore IDs that aren't found in both matrices
+        obs = mantel(x, y, alternative='less', strict=False)
+
+        self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
+        self.assertAlmostEqual(obs[1], 0.843)
+        self.assertEqual(obs[2], 3)
+
+    def test_distance_matrix_instances_with_lookup(self):
+        self.minx_dm.ids = ('a', 'b', 'c')
+        self.miny_dm.ids = ('d', 'e', 'f')
+        lookup = {'a': 'A', 'b': 'B', 'c': 'C',
+                  'd': 'A', 'e': 'B', 'f': 'C'}
+
+        np.random.seed(0)
+
+        obs = mantel(self.minx_dm, self.miny_dm, alternative='less',
+                     lookup=lookup)
+        self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
+        self.assertAlmostEqual(obs[1], 0.843)
+        self.assertEqual(obs[2], 3)
+
+    def test_one_sided_greater(self):
+        np.random.seed(0)
+
+        obs = mantel(self.minx, self.miny, alternative='greater')
+        self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
+        self.assertAlmostEqual(obs[1], 0.324)
+        self.assertEqual(obs[2], 3)
+
+        obs = mantel(self.minx, self.minx, alternative='greater')
+        self.assertAlmostEqual(obs[0], 1)
+        self.assertAlmostEqual(obs[1], 0.172)
+        self.assertEqual(obs[2], 3)
+
+    def test_one_sided_less(self):
+        # no need to seed here as permuted test statistics will all be less
+        # than or equal to the observed test statistic (1.0)
+        for method in self.methods:
+            obs = mantel(self.minx, self.minx, method=method,
+                         alternative='less')
+            self.assertEqual(obs, (1, 1, 3))
+
+        np.random.seed(0)
+
+        obs = mantel(self.minx, self.miny, alternative='less')
+        self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
+        self.assertAlmostEqual(obs[1], 0.843)
+        self.assertEqual(obs[2], 3)
+
+        obs = mantel(self.minx, self.minz, alternative='less')
+        self.assertAlmostEqual(obs[0], self.exp_x_vs_z)
+        self.assertAlmostEqual(obs[1], 0.172)
+        self.assertEqual(obs[2], 3)
+
+    def test_two_sided(self):
+        np.random.seed(0)
+
+        obs = mantel(self.minx, self.minx, method='spearman',
+                     alternative='two-sided')
+        self.assertEqual(obs[0], 1)
+        self.assertAlmostEqual(obs[1], 0.328)
+        self.assertEqual(obs[2], 3)
+
+        obs = mantel(self.minx, self.miny, method='spearman',
+                     alternative='two-sided')
+        self.assertAlmostEqual(obs[0], 0.5)
+        self.assertAlmostEqual(obs[1], 1.0)
+        self.assertEqual(obs[2], 3)
+
+        obs = mantel(self.minx, self.minz, method='spearman',
+                     alternative='two-sided')
+        self.assertAlmostEqual(obs[0], -1)
+        self.assertAlmostEqual(obs[1], 0.322)
+        self.assertEqual(obs[2], 3)
+
+    def test_vegan_example(self):
+        np.random.seed(0)
+
+        # pearson
+        obs = mantel(self.veg_dm_vegan, self.env_dm_vegan,
+                     alternative='greater')
+        self.assertAlmostEqual(obs[0], 0.3047454)
+        self.assertAlmostEqual(obs[1], 0.002)
+        self.assertEqual(obs[2], 24)
+
+        # spearman
+        obs = mantel(self.veg_dm_vegan, self.env_dm_vegan,
+                     alternative='greater', method='spearman')
+        self.assertAlmostEqual(obs[0], 0.283791)
+        self.assertAlmostEqual(obs[1], 0.003)
+        self.assertEqual(obs[2], 24)
+
+    def test_no_variation_pearson(self):
+        # Output doesn't match vegan::mantel with method='pearson'. Consider
+        # revising output and this test depending on outcome of
+        # https://github.com/scipy/scipy/issues/3728
+        for alt in self.alternatives:
+            # test one or both inputs having no variation in their
+            # distances
+            obs = mantel(self.miny, self.no_variation, method='pearson',
+                         alternative=alt)
+            npt.assert_equal(obs, (0.0, 1.0, 3))
+
+            obs = mantel(self.no_variation, self.miny, method='pearson',
+                         alternative=alt)
+            npt.assert_equal(obs, (0.0, 1.0, 3))
+
+            obs = mantel(self.no_variation, self.no_variation,
+                         method='pearson', alternative=alt)
+            npt.assert_equal(obs, (1.0, 1.0, 3))
+
+    def test_no_variation_spearman(self):
+        exp = (np.nan, np.nan, 3)
+        for alt in self.alternatives:
+            obs = mantel(self.miny, self.no_variation, method='spearman',
+                         alternative=alt)
+            npt.assert_equal(obs, exp)
+
+            obs = mantel(self.no_variation, self.miny, method='spearman',
+                         alternative=alt)
+            npt.assert_equal(obs, exp)
+
+            obs = mantel(self.no_variation, self.no_variation,
+                         method='spearman', alternative=alt)
+            npt.assert_equal(obs, exp)
+
+    def test_no_side_effects(self):
+        minx = np.asarray(self.minx, dtype='float')
+        miny = np.asarray(self.miny, dtype='float')
+
+        minx_copy = np.copy(minx)
+        miny_copy = np.copy(miny)
+
+        mantel(minx, miny)
+
+        # Make sure we haven't modified the input.
+        npt.assert_equal(minx, minx_copy)
+        npt.assert_equal(miny, miny_copy)
+
+    def test_invalid_distance_matrix(self):
+        # Single asymmetric, non-hollow distance matrix.
+        with self.assertRaises(DissimilarityMatrixError):
+            mantel([[1, 2], [3, 4]], [[0, 0], [0, 0]])
+
+        # Two asymmetric distance matrices.
+        with self.assertRaises(DistanceMatrixError):
+            mantel([[0, 2], [3, 0]], [[0, 1], [0, 0]])
+
+    def test_invalid_input(self):
+        # invalid correlation method
+        with self.assertRaises(ValueError):
+            mantel([[1]], [[1]], method='brofist')
+
+        # invalid permutations
+        with self.assertRaises(ValueError):
+            mantel([[1]], [[1]], permutations=-1)
+
+        # invalid alternative
+        with self.assertRaises(ValueError):
+            mantel([[1]], [[1]], alternative='no cog yay')
+
+        # too small dms
+        with self.assertRaises(ValueError):
+            mantel([[0, 3], [3, 0]], [[0, 2], [2, 0]])
+
+
+class PairwiseMantelTests(MantelTestData):
+    def setUp(self):
+        super(PairwiseMantelTests, self).setUp()
+
+        self.min_dms = (self.minx_dm, self.miny_dm, self.minz_dm)
+
+        self.exp_results_minimal = pd.read_csv(
+            get_data_path('pwmantel_exp_results_minimal.txt'), sep='\t',
+            index_col=(0, 1))
+
+        self.exp_results_minimal_with_labels = pd.read_csv(
+            get_data_path('pwmantel_exp_results_minimal_with_labels.txt'),
+            sep='\t', index_col=(0, 1))
+
+        self.exp_results_duplicate_dms = pd.read_csv(
+            get_data_path('pwmantel_exp_results_duplicate_dms.txt'),
+            sep='\t', index_col=(0, 1))
+
+        self.exp_results_na_p_value = pd.read_csv(
+            get_data_path('pwmantel_exp_results_na_p_value.txt'),
+            sep='\t', index_col=(0, 1))
+
+        self.exp_results_reordered_distance_matrices = pd.read_csv(
+            get_data_path('pwmantel_exp_results_reordered_distance_matrices'
+                          '.txt'),
+            sep='\t', index_col=(0, 1))
+
+        self.exp_results_dm_dm2 = pd.read_csv(
+            get_data_path('pwmantel_exp_results_dm_dm2.txt'),
+            sep='\t', index_col=(0, 1))
+
+        self.exp_results_all_dms = pd.read_csv(
+            get_data_path('pwmantel_exp_results_all_dms.txt'),
+            sep='\t', index_col=(0, 1))
+
+    def test_minimal_compatible_input(self):
+        # Matrices are already in the correct order and have matching IDs.
+        np.random.seed(0)
+
+        # input as DistanceMatrix instances
+        obs = pwmantel(self.min_dms, alternative='greater')
+        assert_frame_equal(obs, self.exp_results_minimal)
+
+        np.random.seed(0)
+
+        # input as array_like
+        obs = pwmantel((self.minx, self.miny, self.minz),
+                       alternative='greater')
+        assert_frame_equal(obs, self.exp_results_minimal)
+
+    def test_minimal_compatible_input_with_labels(self):
+        np.random.seed(0)
+
+        obs = pwmantel(self.min_dms, alternative='greater',
+                       labels=('minx', 'miny', 'minz'))
+        assert_frame_equal(obs, self.exp_results_minimal_with_labels)
+
+    def test_duplicate_dms(self):
+        obs = pwmantel((self.minx_dm, self.minx_dm, self.minx_dm),
+                       alternative='less')
+        assert_frame_equal(obs, self.exp_results_duplicate_dms)
+
+    def test_na_p_value(self):
+        obs = pwmantel((self.miny_dm, self.minx_dm), method='spearman',
+                       permutations=0)
+        assert_frame_equal(obs, self.exp_results_na_p_value)
+
+    def test_reordered_distance_matrices(self):
+        # Matrices have matching IDs but they all have different ordering.
+        x = self.minx_dm.filter(['1', '0', '2'])
+        y = self.miny_dm.filter(['0', '2', '1'])
+        z = self.minz_dm.filter(['1', '2', '0'])
+
+        np.random.seed(0)
+
+        obs = pwmantel((x, y, z), alternative='greater')
+        assert_frame_equal(obs, self.exp_results_reordered_distance_matrices)
+
+    def test_strict(self):
+        # Matrices have some matching and nonmatching IDs, with different
+        # ordering.
+        x = self.minx_dm_extra.filter(['1', '0', 'foo', '2'])
+        y = self.miny_dm.filter(['0', '2', '1'])
+        z = self.minz_dm_extra.filter(['bar', '1', '2', '0'])
+
+        np.random.seed(0)
+
+        # strict=False should discard IDs that aren't found in both matrices
+        obs = pwmantel((x, y, z), alternative='greater', strict=False)
+        assert_frame_equal(obs, self.exp_results_reordered_distance_matrices)
+
+    def test_id_lookup(self):
+        # Matrices have mismatched IDs but a lookup is provided.
+        self.minx_dm_extra.ids = ['a', 'b', 'c', 'foo']
+        self.minz_dm_extra.ids = ['d', 'e', 'f', 'bar']
+        lookup = {'a': '0', 'b': '1', 'c': '2', 'foo': 'foo',
+                  'd': '0', 'e': '1', 'f': '2', 'bar': 'bar',
+                  '0': '0', '1': '1', '2': '2'}
+
+        x = self.minx_dm_extra.filter(['b', 'a', 'foo', 'c'])
+        y = self.miny_dm.filter(['0', '2', '1'])
+        z = self.minz_dm_extra.filter(['bar', 'e', 'f', 'd'])
+
+        x_copy = x.copy()
+        y_copy = y.copy()
+        z_copy = z.copy()
+
+        np.random.seed(0)
+
+        obs = pwmantel((x, y, z), alternative='greater', strict=False,
+                       lookup=lookup)
+        assert_frame_equal(obs, self.exp_results_reordered_distance_matrices)
+
+        # Make sure the inputs aren't modified.
+        self.assertEqual(x, x_copy)
+        self.assertEqual(y, y_copy)
+        self.assertEqual(z, z_copy)
+
+    def test_too_few_dms(self):
+        with self.assertRaises(ValueError):
+            pwmantel([self.miny_dm])
+
+    def test_wrong_number_of_labels(self):
+        with self.assertRaises(ValueError):
+            pwmantel(self.min_dms, labels=['foo', 'bar'])
+
+    def test_duplicate_labels(self):
+        with self.assertRaises(ValueError):
+            pwmantel(self.min_dms, labels=['foo', 'bar', 'foo'])
+
+    def test_mixed_input_types(self):
+        # DistanceMatrix, DistanceMatrix, array_like
+        with self.assertRaises(TypeError):
+            pwmantel((self.miny_dm, self.minx_dm, self.minz))
+
+    def test_filepaths_as_input(self):
+        dms = [
+            get_data_path('dm.txt'),
+            get_data_path('dm2.txt'),
+        ]
+        np.random.seed(0)
+
+        obs = pwmantel(dms)
+        assert_frame_equal(obs, self.exp_results_dm_dm2)
+
+    def test_many_filepaths_as_input(self):
+        dms = [
+            get_data_path('dm2.txt'),
+            get_data_path('dm.txt'),
+            get_data_path('dm4.txt'),
+            get_data_path('dm3.txt')
+        ]
+        np.random.seed(0)
+
+        obs = pwmantel(dms)
+        assert_frame_equal(obs, self.exp_results_all_dms)
+
+
+class OrderDistanceMatricesTests(MantelTestData):
+    def setUp(self):
+        super(OrderDistanceMatricesTests, self).setUp()
+
+    def test_array_like_input(self):
+        obs = _order_dms(self.minx, self.miny)
+        self.assertEqual(obs, (self.minx_dm, self.miny_dm))
+
+    def test_reordered_distance_matrices(self):
+        # All matching IDs but with different orderings.
+        x = self.minx_dm.filter(['1', '0', '2'])
+        y = self.miny_dm.filter(['0', '2', '1'])
+
+        exp = (x, y.filter(['1', '0', '2']))
+        obs = _order_dms(x, y)
+        self.assertEqual(obs, exp)
+
+    def test_reordered_and_nonmatching_distance_matrices(self):
+        # Some matching and nonmatching IDs, with different ordering.
+        x = self.minx_dm_extra.filter(['1', '0', 'foo', '2'])
+        z = self.minz_dm_extra.filter(['bar', '0', '2', '1'])
+
+        exp = (x.filter(['1', '0', '2']), z.filter(['1', '0', '2']))
+        obs = _order_dms(x, z, strict=False)
+        self.assertEqual(obs, exp)
+
+    def test_id_lookup(self):
+        # Matrices have mismatched IDs but a lookup is provided.
+        self.minx_dm_extra.ids = ['a', 'b', 'c', 'foo']
+        self.minz_dm_extra.ids = ['d', 'e', 'f', 'bar']
+        lookup = {'a': '0', 'b': '1', 'c': '2', 'foo': 'foo',
+                  'd': '0', 'e': '1', 'f': '2', 'bar': 'bar'}
+
+        x = self.minx_dm_extra.filter(['b', 'a', 'foo', 'c'])
+        z = self.minz_dm_extra.filter(['bar', 'e', 'f', 'd'])
+
+        x_copy = x.copy()
+        z_copy = z.copy()
+
+        exp = (self.minx_dm.filter(['1', '0', '2']),
+               self.minz_dm.filter(['1', '0', '2']))
+        obs = _order_dms(x, z, strict=False, lookup=lookup)
+        self.assertEqual(obs, exp)
+
+        # Make sure the inputs aren't modified.
+        self.assertEqual(x, x_copy)
+        self.assertEqual(z, z_copy)
+
+    def test_lookup_with_array_like(self):
+        lookup = {'0': 'a', '1': 'b', '2': 'c'}
+        with self.assertRaises(ValueError):
+            _order_dms(self.minx, self.miny, lookup=lookup)
+
+    def test_shape_mismatch(self):
+        with self.assertRaises(ValueError):
+            _order_dms(self.minx, [[0, 2], [2, 0]])
+
+    def test_missing_ids_in_lookup(self):
+        # Mapping for '1' is missing. Should get an error while remapping IDs
+        # for the first distance matrix.
+        lookup = {'0': 'a', '2': 'c'}
+
+        with self.assertRaisesRegexp(KeyError, "first.*(x).*'1'\"$"):
+            _order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
+
+        # Mapping for 'bar' is missing. Should get an error while remapping IDs
+        # for the second distance matrix.
+        lookup = {'0': 'a', '1': 'b', '2': 'c',
+                  'foo': 'a', 'baz': 'c'}
+        self.miny_dm.ids = ('foo', 'bar', 'baz')
+
+        with self.assertRaisesRegexp(KeyError, "second.*(y).*'bar'\"$"):
+            _order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
+
+    def test_nonmatching_ids_strict_true(self):
+        with self.assertRaises(ValueError):
+            _order_dms(self.minx_dm, self.minz_dm_extra, strict=True)
+
+    def test_no_matching_ids(self):
+        self.minx_dm.ids = ['foo', 'bar', 'baz']
+        self.miny_dm.ids = ['a', 'b', 'c']
+
+        with self.assertRaises(ValueError):
+            _order_dms(self.minx_dm, self.miny_dm, strict=False)
+
+    def test_mixed_input_types(self):
+        with self.assertRaises(TypeError):
+            _order_dms(self.minx, self.minz_dm)
+
+        with self.assertRaises(TypeError):
+            _order_dms(self.minz_dm, self.minx)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/distance/tests/test_permanova.py b/skbio/stats/distance/tests/test_permanova.py
new file mode 100644
index 0000000..4c2f1a8
--- /dev/null
+++ b/skbio/stats/distance/tests/test_permanova.py
@@ -0,0 +1,200 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+
+from functools import partial
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from pandas.util.testing import assert_series_equal
+
+from skbio import DistanceMatrix
+from skbio.stats.distance import permanova, PERMANOVA
+
+
+class TestPERMANOVA(TestCase):
+    """All results were verified with R (vegan::adonis)."""
+
+    def setUp(self):
+        # Distance matrices with and without ties in the ranks, with 2 groups
+        # of equal size.
+        dm_ids = ['s1', 's2', 's3', 's4']
+        self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
+        self.df = pd.read_csv(
+            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
+                     's1,Control'), index_col=0)
+
+        self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
+                                       [1, 0, 3, 2],
+                                       [1, 3, 0, 3],
+                                       [4, 2, 3, 0]], dm_ids)
+
+        self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
+                                          [1, 0, 3, 2],
+                                          [5, 3, 0, 3],
+                                          [4, 2, 3, 0]], dm_ids)
+
+        # Test with 3 groups of unequal size.
+        self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
+                                 'Treatment1', 'Control', 'Control']
+
+        # Equivalent grouping but with different labels -- groups should be
+        # assigned different integer labels but results should be the same.
+        self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
+
+        self.dm_unequal = DistanceMatrix(
+            [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
+             [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
+             [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
+             [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
+             [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
+             [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
+            ['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        # Expected series index is the same across all tests.
+        self.exp_index = ['method name', 'test statistic name', 'sample size',
+                          'number of groups', 'test statistic', 'p-value',
+                          'number of permutations']
+
+        # Stricter series equality testing than the default.
+        self.assert_series_equal = partial(assert_series_equal,
+                                           check_index_type=True,
+                                           check_series_type=True)
+
+    def test_call_ties(self):
+        # Ensure we get the same results if we rerun the method using the same
+        # inputs. Also ensure we get the same results if we run the method
+        # using a grouping vector or a data frame with equivalent groupings.
+        exp = pd.Series(index=self.exp_index,
+                        data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999])
+
+        for _ in range(2):
+            np.random.seed(0)
+            obs = permanova(self.dm_ties, self.grouping_equal)
+            self.assert_series_equal(obs, exp)
+
+        for _ in range(2):
+            np.random.seed(0)
+            obs = permanova(self.dm_ties, self.df, column='Group')
+            self.assert_series_equal(obs, exp)
+
+    def test_call_no_ties(self):
+        exp = pd.Series(index=self.exp_index,
+                        data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999])
+        np.random.seed(0)
+        obs = permanova(self.dm_no_ties, self.grouping_equal)
+        self.assert_series_equal(obs, exp)
+
+    def test_call_no_permutations(self):
+        exp = pd.Series(index=self.exp_index,
+                        data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0])
+        obs = permanova(self.dm_no_ties, self.grouping_equal, permutations=0)
+        self.assert_series_equal(obs, exp)
+
+    def test_call_unequal_group_sizes(self):
+        exp = pd.Series(index=self.exp_index,
+                        data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645,
+                              999])
+
+        np.random.seed(0)
+        obs = permanova(self.dm_unequal, self.grouping_unequal)
+        self.assert_series_equal(obs, exp)
+
+        np.random.seed(0)
+        obs = permanova(self.dm_unequal, self.grouping_unequal_relabeled)
+        self.assert_series_equal(obs, exp)
+
+
+class TestPERMANOVAClass(TestCase):
+    """All results were verified with R (vegan::adonis)."""
+
+    def setUp(self):
+        # Distance matrices with and without ties in the ranks, with 2 groups
+        # of equal size.
+        dm_ids = ['s1', 's2', 's3', 's4']
+        grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
+        df = pd.read_csv(
+            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
+                     's1,Control'), index_col=0)
+
+        self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
+                                       [1, 0, 3, 2],
+                                       [1, 3, 0, 3],
+                                       [4, 2, 3, 0]], dm_ids)
+
+        self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
+                                          [1, 0, 3, 2],
+                                          [5, 3, 0, 3],
+                                          [4, 2, 3, 0]], dm_ids)
+
+        # Test with 3 groups of unequal size.
+        grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
+                            'Treatment1', 'Control', 'Control']
+
+        self.dm_unequal = DistanceMatrix(
+            [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
+             [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
+             [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
+             [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
+             [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
+             [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
+            ['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        self.permanova_ties = PERMANOVA(self.dm_ties, grouping_equal)
+        self.permanova_no_ties = PERMANOVA(self.dm_no_ties, grouping_equal)
+        self.permanova_ties_df = PERMANOVA(self.dm_ties, df, column='Group')
+        self.permanova_unequal = PERMANOVA(self.dm_unequal, grouping_unequal)
+
+    def test_call_ties(self):
+        # Ensure we get the same results if we rerun the method on the same
+        # object. Also ensure we get the same results if we run the method
+        # using a grouping vector or a data frame with equivalent groupings.
+        for inst in self.permanova_ties, self.permanova_ties_df:
+            for trial in range(2):
+                np.random.seed(0)
+                obs = inst()
+                self.assertEqual(obs.sample_size, 4)
+                npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
+                self.assertAlmostEqual(obs.statistic, 2.0)
+                self.assertAlmostEqual(obs.p_value, 0.671)
+                self.assertEqual(obs.permutations, 999)
+
+    def test_call_no_ties(self):
+        np.random.seed(0)
+        obs = self.permanova_no_ties()
+        self.assertEqual(obs.sample_size, 4)
+        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
+        self.assertAlmostEqual(obs.statistic, 4.4)
+        self.assertAlmostEqual(obs.p_value, 0.332)
+        self.assertEqual(obs.permutations, 999)
+
+    def test_call_no_permutations(self):
+        obs = self.permanova_no_ties(0)
+        self.assertEqual(obs.sample_size, 4)
+        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
+        self.assertAlmostEqual(obs.statistic, 4.4)
+        self.assertEqual(obs.p_value, None)
+        self.assertEqual(obs.permutations, 0)
+
+    def test_call_unequal_group_sizes(self):
+        np.random.seed(0)
+        obs = self.permanova_unequal()
+        self.assertEqual(obs.sample_size, 6)
+        npt.assert_array_equal(obs.groups,
+                               ['Control', 'Treatment1', 'Treatment2'])
+        self.assertAlmostEqual(obs.statistic, 0.578848, 6)
+        self.assertAlmostEqual(obs.p_value, 0.645)
+        self.assertEqual(obs.permutations, 999)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/gradient.py b/skbio/stats/gradient.py
new file mode 100644
index 0000000..9b0bd9c
--- /dev/null
+++ b/skbio/stats/gradient.py
@@ -0,0 +1,852 @@
+r"""
+Gradient analyses (:mod:`skbio.stats.gradient`)
+===============================================
+
+.. currentmodule:: skbio.stats.gradient
+
+This module provides functionality for performing gradient analyses.
+The algorithms included in this module mainly allows performing analysis of
+volatility on time series data, but they can be applied to any data that
+contains a gradient.
+
+Classes
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+   GradientANOVA
+   AverageGradientANOVA
+   TrajectoryGradientANOVA
+   FirstDifferenceGradientANOVA
+   WindowDifferenceGradientANOVA
+   GroupResults
+   CategoryResults
+   GradientANOVAResults
+
+Examples
+--------
+Assume we have the following coordinates:
+
+>>> import numpy as np
+>>> import pandas as pd
+>>> from skbio.stats.gradient import AverageGradientANOVA
+>>> coord_data = {'PC.354': np.array([0.2761, -0.0341, 0.0633, 0.1004]),
+...               'PC.355': np.array([0.2364, 0.2186, -0.0301, -0.0225]),
+...               'PC.356': np.array([0.2208, 0.0874, -0.3519, -0.0031]),
+...               'PC.607': np.array([-0.1055, -0.4140, -0.15, -0.116]),
+...               'PC.634': np.array([-0.3716, 0.1154, 0.0721, 0.0898])}
+>>> coords = pd.DataFrame.from_dict(coord_data, orient='index')
+
+the following metadata map:
+
+>>> metadata_map = {'PC.354': {'Treatment': 'Control', 'Weight': '60'},
+...            'PC.355': {'Treatment': 'Control', 'Weight': '55'},
+...            'PC.356': {'Treatment': 'Control', 'Weight': '50'},
+...            'PC.607': {'Treatment': 'Fast', 'Weight': '65'},
+...            'PC.634': {'Treatment': 'Fast', 'Weight': '68'}}
+>>> metadata_map = pd.DataFrame.from_dict(metadata_map, orient='index')
+
+and the following array with the proportion explained of each coord:
+
+>>> prop_expl = np.array([25.6216, 15.7715, 14.1215, 11.6913, 9.8304])
+
+Then to compute the average trajectory of this data:
+
+>>> av = AverageGradientANOVA(coords, prop_expl, metadata_map,
+...                     trajectory_categories=['Treatment'],
+...                     sort_category='Weight')
+>>> trajectory_results = av.get_trajectories()
+
+Check the algorithm used to compute the trajectory_results:
+
+>>> print(trajectory_results.algorithm)
+avg
+
+Check if we weighted the data or not:
+
+>>> print(trajectory_results.weighted)
+False
+
+Check the trajectory_results results of one of the categories:
+
+>>> print(trajectory_results.categories[0].category)
+Treatment
+>>> print(trajectory_results.categories[0].probability)
+0.0118478282382
+
+Check the trajectory_results results of one group of one of the categories:
+
+>>> print(trajectory_results.categories[0].groups[0].name)
+Control
+>>> print(trajectory_results.categories[0].groups[0].trajectory)
+[ 3.52199973  2.29597001  3.20309816]
+>>> print(trajectory_results.categories[0].groups[0].info)
+{'avg': 3.007022633956606}
+"""
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from copy import deepcopy
+from collections import defaultdict
+from numbers import Integral
+
+import numpy as np
+from natsort import natsorted
+from scipy.stats import f_oneway
+
+
+def _weight_by_vector(trajectories, w_vector):
+    r"""weights the values of `trajectories` given a weighting vector
+    `w_vector`.
+
+    Each value in `trajectories` will be weighted by the 'rate of change'
+    to 'optimal rate of change' ratio. The 'rate of change' of a vector
+    measures how each point in the vector changes with respect to its
+    predecessor point. The 'optimal rate of change' is the rate of change
+    in which each point in the vector performs the same change than its
+    predecessor, meaning that when calling this function over evenly spaced
+    `w_vector` values, no change will be reflected on the output.
+
+    Parameters
+    ----------
+    trajectories: pandas.DataFrame
+        Values to weight
+    w_vector: pandas.Series
+        Values used to weight `trajectories`
+
+    Returns
+    -------
+    pandas.DataFrame
+        A weighted version of `trajectories`.
+
+    Raises
+    ------
+    ValueError
+        If `trajectories` and `w_vector` don't have equal lengths
+        If `w_vector` is not a gradient
+    TypeError
+        If `trajectories` and `w_vector` are not iterables
+    """
+    try:
+        if len(trajectories) != len(w_vector):
+            raise ValueError("trajectories (%d) & w_vector (%d) must be equal "
+                             "lengths" % (len(trajectories), len(w_vector)))
+    except TypeError:
+        raise TypeError("trajectories and w_vector must be iterables")
+
+    # check no repeated values are passed in the weighting vector
+    if len(set(w_vector)) != len(w_vector):
+        raise ValueError("The weighting vector must be a gradient")
+
+    # no need to weight in case of a one element vector
+    if len(w_vector) == 1:
+        return trajectories
+
+    # Cast to float so divisions have a floating point resolution
+    total_length = float(max(w_vector) - min(w_vector))
+
+    # Reflects the expected gradient between subsequent values in w_vector
+    # the first value isn't weighted so subtract one from the number of
+    # elements
+    optimal_gradient = total_length/(len(w_vector)-1)
+
+    # for all elements apply the weighting function
+    for i, idx in enumerate(trajectories.index):
+        # Skipping the first element is it doesn't need to be weighted
+        if i != 0:
+            trajectories.ix[idx] = (trajectories.ix[idx] * optimal_gradient /
+                                    (np.abs((w_vector[i] - w_vector[i-1]))))
+
+    return trajectories
+
+
+def _ANOVA_trajectories(category, res_by_group):
+    r"""Run ANOVA over `res_by_group`
+
+    If ANOVA cannot be run in the current category (because either there is
+    only one group in category or there is a group with only one member)
+    the result CategoryResults instance has `probability` and `groups` set
+    to None and message is set to a string explaining why ANOVA was not run
+
+    Returns
+    -------
+    CategoryResults
+        An instance of CategoryResults holding the results of the trajectory
+        analysis applied on `category`
+    """
+    # If there is only one group under category we cannot run ANOVA
+    if len(res_by_group) == 1:
+        return CategoryResults(category, None, None,
+                               'Only one value in the group.')
+    # Check if groups can be tested using ANOVA. ANOVA testing requires
+    # all elements to have at least size greater to one.
+    values = [res.trajectory.astype(float) for res in res_by_group]
+    if any([len(value) == 1 for value in values]):
+        return CategoryResults(category, None, None,
+                               'This group can not be used. All groups '
+                               'should have more than 1 element.')
+    # We are ok to run ANOVA
+    _, p_val = f_oneway(*values)
+    return CategoryResults(category, p_val, res_by_group, None)
+
+
+class GroupResults(object):
+    """Store the trajectory results of a group of a metadata category
+
+    Attributes
+    ----------
+    name : str
+        The name of the group within the metadata category
+    trajectory : array like
+        The result trajectory in an 1-D numpy array
+    mean : float
+        The mean of the trajectory
+    info : dict
+        Any extra information computed by the trajectory algorithm. Depends on
+        the algorithm
+    message : str
+        A message with information of the execution of the algorithm
+
+    """
+
+    def __init__(self, name, trajectory, mean, info, message):
+        self.name = name
+        self.trajectory = trajectory
+        self.mean = mean
+        self.info = info
+        self.message = message
+
+    def to_files(self, out_f, raw_f):
+        r"""Save the trajectory analysis results for a category group to files
+        in text format.
+
+        Parameters
+        ----------
+        out_f : file-like object
+            File-like object to write trajectory analysis data to. Must have a
+            `write` method. It is the caller's responsibility to close
+            `out_f` when done (if necessary)
+        raw_f : file-like object
+            File-like object to write trajectories trajectory values. Must have
+            a `write` method. It is the caller's responsibility to close
+            `out_f` when done (if necessary)
+        """
+        out_f.write('For group "%s", the group means is: %f\n'
+                    % (self.name, self.mean))
+        raw_f.write('For group "%s":\n' % self.name)
+
+        if self.message:
+            out_f.write('%s\n' % self.message)
+            raw_f.write('%s\n' % self.message)
+
+        out_f.write('The info is: %s\n'
+                    % sorted(((k, v) for k, v in self.info.items())))
+        raw_f.write('The trajectory is:\n[%s]\n'
+                    % ", ".join(map(str, self.trajectory)))
+
+
+class CategoryResults(object):
+    """Store the trajectory results of a metadata category
+
+    Attributes
+    ----------
+    category : str
+        The name of the category
+    probability : float
+        The ANOVA probability that the category groups are independent
+    groups : list of GroupResults
+        The trajectory results for each group in the category
+    message : str
+        A message with information of the execution of the algorithm
+
+    """
+
+    def __init__(self, category, probability, groups, message):
+        self.category = category
+        self.probability = probability
+        self.groups = groups
+        self.message = message
+
+    def to_files(self, out_f, raw_f):
+        r"""Save the trajectory analysis results for a category to files in
+        text format.
+
+        Parameters
+        ----------
+        out_f : file-like object
+            File-like object to write trajectory analysis data to. Must have a
+            `write` method. It is the caller's responsibility to close `out_f`
+            when done (if necessary)
+        raw_f : file-like object
+            File-like object to write trajectory raw values. Must have a
+            `write` method. It is the caller's responsibility to close `out_f`
+            when done (if necessary)
+        """
+        if self.probability is None:
+            out_f.write('Grouped by "%s": %s\n'
+                        % (self.category, self.message))
+        else:
+            out_f.write('Grouped by "%s", probability: %f\n'
+                        % (self.category, self.probability))
+            raw_f.write('Grouped by "%s"\n' % self.category)
+            for group in self.groups:
+                group.to_files(out_f, raw_f)
+
+
+class GradientANOVAResults(object):
+    """Store the trajectory results
+
+    Attributes
+    ----------
+    algorithm : str
+        The algorithm used to compute trajectories
+    weighted : bool
+        If true, a weighting vector was used
+    categories : list of CategoryResults
+        The trajectory results for each metadata category
+
+    """
+
+    def __init__(self, algorithm, weighted, categories):
+        self.algorithm = algorithm
+        self.weighted = weighted
+        self.categories = categories
+
+    def to_files(self, out_f, raw_f):
+        r"""Save the trajectory analysis results to files in text format.
+
+        Parameters
+        ----------
+        out_f : file-like object
+            File-like object to write trajectories analysis data to. Must have
+            a `write` method. It is the caller's responsibility to close
+            `out_f` when done (if necessary)
+        raw_f : file-like object
+            File-like object to write trajectories raw values. Must have a
+            `write` method. It is the caller's responsibility to close `out_f`
+            when done (if necessary)
+        """
+        out_f.write('Trajectory algorithm: %s\n' % self.algorithm)
+        raw_f.write('Trajectory algorithm: %s\n' % self.algorithm)
+
+        if self.weighted:
+            out_f.write('** This output is weighted **\n')
+            raw_f.write('** This output is weighted **\n')
+
+        out_f.write('\n')
+        raw_f.write('\n')
+
+        for cat_results in self.categories:
+            cat_results.to_files(out_f, raw_f)
+            out_f.write('\n')
+            raw_f.write('\n')
+
+
+class GradientANOVA(object):
+    r"""Base class for the Trajectory algorithms
+
+    Parameters
+    ----------
+    coords : pandas.DataFrame
+        The coordinates for each sample id
+    prop_expl : array like
+        The numpy 1-D array with the proportion explained by each axis in
+        coords
+    metadata_map : pandas.DataFrame
+        The metadata map, indexed by sample ids and columns are metadata
+        categories
+    trajectory_categories : list of str, optional
+        A list of metadata categories to use to create the trajectories. If
+        None is passed, the trajectories for all metadata categories are
+        computed. Default: None, compute all of them
+    sort_category : str, optional
+        The metadata category to use to sort the trajectories. Default: None
+    axes : int, optional
+        The number of axes to account while doing the trajectory specific
+        calculations. Pass 0 to compute all of them. Default: 3
+    weighted : bool, optional
+        If true, the output is weighted by the space between samples in the
+        `sort_category` column
+
+    Raises
+    ------
+    ValueError
+        If any category of `trajectory_categories` is not present in
+        `metadata_map`
+        If `sort_category` is not present in `metadata_map`
+        If `axes` is not between 0 and the maximum number of axes available
+        If `weighted` is True and no `sort_category` is provided
+        If `weighted` is True and the values under `sort_category` are not
+        numerical
+        If `coords` and `metadata_map` does not have samples in common
+    """
+    # Should be defined by the derived classes
+    _alg_name = None
+
+    def __init__(self, coords, prop_expl, metadata_map,
+                 trajectory_categories=None, sort_category=None, axes=3,
+                 weighted=False):
+        if not trajectory_categories:
+            # If trajectory_categories is not provided, use all the categories
+            # present in the metadata map
+            trajectory_categories = metadata_map.keys()
+        else:
+            # Check that trajectory_categories are in metadata_map
+            for category in trajectory_categories:
+                if category not in metadata_map:
+                    raise ValueError("Category %s not present in metadata."
+                                     % category)
+
+        # Check that sort_categories is in metadata_map
+        if sort_category and sort_category not in metadata_map:
+            raise ValueError("Sort category %s not present in metadata."
+                             % sort_category)
+
+        if axes == 0:
+            # If axes == 0, we should compute the trajectories for all axes
+            axes = len(prop_expl)
+        elif axes > len(prop_expl) or axes < 0:
+            # Axes should be 0 <= axes <= len(prop_expl)
+            raise ValueError("axes should be between 0 and the max number of "
+                             "axes available (%d), found: %d "
+                             % (len(prop_expl), axes))
+
+        # Restrict coordinates to those axes that we actually need to compute
+        self._coords = coords.ix[:, :axes-1]
+        self._prop_expl = prop_expl[:axes]
+        self._metadata_map = metadata_map
+        self._weighted = weighted
+
+        # Remove any samples from coords not present in mapping file
+        # and remove any samples from metadata_map not present in coords
+        self._normalize_samples()
+
+        # Create groups
+        self._make_groups(trajectory_categories, sort_category)
+
+        # Compute the weighting_vector
+        self._weighting_vector = None
+        if weighted:
+            if not sort_category:
+                raise ValueError("You should provide a sort category if you "
+                                 "want to weight the trajectories")
+            try:
+                self._weighting_vector = \
+                    self._metadata_map[sort_category].astype(np.float64)
+            except ValueError:
+                raise ValueError("The sorting category must be numeric")
+
+        # Initialize the message buffer
+        self._message_buffer = []
+
+    def get_trajectories(self):
+        r"""Compute the trajectories for each group in each category and run
+        ANOVA over the results to test group independence.
+
+        Returns
+        -------
+        GradientANOVAResults
+            An instance of GradientANOVAResults holding the results.
+        """
+        result = GradientANOVAResults(self._alg_name, self._weighted, [])
+        # Loop through all the categories that we should compute
+        # the trajectories
+        for cat, cat_groups in self._groups.items():
+            # Loop through all the category values present in the current
+            # category and compute the trajectory for each of them
+            res_by_group = [self._get_group_trajectories(group, sample_ids)
+                            for group, sample_ids in cat_groups.items()]
+
+            result.categories.append(_ANOVA_trajectories(cat, res_by_group))
+
+        return result
+
+    def _normalize_samples(self):
+        r"""Ensures that `self._coords` and `self._metadata_map` have the same
+        sample ids
+
+        Raises
+        ------
+        ValueError
+            If `coords` and `metadata_map` does not have samples in common
+        """
+        # Figure out the sample ids in common
+        coords_sample_ids = set(self._coords.index)
+        mm_sample_ids = set(self._metadata_map.index)
+        sample_ids = coords_sample_ids.intersection(mm_sample_ids)
+
+        # Check if they actually have sample ids in common
+        if not sample_ids:
+            raise ValueError("Coordinates and metadata map had no samples "
+                             "in common")
+
+        # Need to take a subset of coords
+        if coords_sample_ids != sample_ids:
+            self._coords = self._coords.ix[sample_ids]
+        # Need to take a subset of metadata_map
+        if mm_sample_ids != sample_ids:
+            self._metadata_map = self._metadata_map.ix[sample_ids]
+
+    def _make_groups(self, trajectory_categories, sort_category):
+        r"""Groups the sample ids in `self._metadata_map` by the values in
+        `trajectory_categories`
+
+        Creates `self._groups`, a dictionary keyed by category and values are
+        dictionaries in which the keys represent the group name within the
+        category and values are ordered lists of sample ids
+
+        If `sort_category` is not None, the sample ids are sorted based on the
+        values under this category in the metadata map. Otherwise, they are
+        sorted using the sample id.
+
+        Parameters
+        ----------
+        trajectory_categories : list of str
+            A list of metadata categories to use to create the groups.
+            Default: None, compute all of them
+        sort_category : str or None
+            The category from self._metadata_map to use to sort groups
+        """
+        # If sort_category is provided, we used the value of such category to
+        # sort. Otherwise, we use the sample id.
+        if sort_category:
+            def sort_val(sid):
+                return self._metadata_map[sort_category][sid]
+        else:
+            def sort_val(sid):
+                return sid
+
+        self._groups = defaultdict(dict)
+        for cat in trajectory_categories:
+            # Group samples by category
+            gb = self._metadata_map.groupby(cat)
+            for g, df in gb:
+                self._groups[cat][g] = natsorted(df.index, key=sort_val)
+
+    def _get_group_trajectories(self, group_name, sids):
+        r"""Compute the trajectory results for `group_name` containing the
+        samples `sids`.
+
+        Weights the data if `self._weighted` is True and ``len(sids) > 1``
+
+        Parameters
+        ----------
+        group_name : str
+            The name of the group
+        sids : list of str
+            The sample ids in the group
+
+        Returns
+        -------
+        GroupResults
+            The trajectory results for the given group
+
+        Raises
+        ------
+        RuntimeError
+            If sids is an empty list
+        """
+        # We multiply the coord values with the prop_expl
+        trajectories = self._coords.ix[sids] * self._prop_expl
+
+        if trajectories.empty:
+            # Raising a RuntimeError since in a usual execution this should
+            # never happen. The only way this can happen is if the user
+            # directly calls this method, which shouldn't be done
+            # (that's why the method is private)
+            raise RuntimeError("No samples to process, an empty list cannot "
+                               "be processed")
+
+        # The weighting can only be done over trajectories with a length
+        # greater than 1
+        if self._weighted and len(sids) > 1:
+            trajectories_copy = deepcopy(trajectories)
+            try:
+                trajectories = _weight_by_vector(trajectories_copy,
+                                                 self._weighting_vector[sids])
+            except (FloatingPointError, ValueError):
+                self._message_buffer.append("Could not weight group, no "
+                                            "gradient in the the "
+                                            "weighting vector.\n")
+                trajectories = trajectories_copy
+
+        return self._compute_trajectories_results(group_name,
+                                                  trajectories.ix[sids])
+
+    def _compute_trajectories_results(self, group_name, trajectories):
+        r"""Do the actual trajectories computation over trajectories
+
+        Parameters
+        ----------
+        group_name : str
+            The name of the group
+        trajectories : pandas.DataFrame
+            The sorted trajectories for each sample in the group
+
+        Raises
+        ------
+        NotImplementedError
+            This is the base class
+        """
+        raise NotImplementedError("No algorithm is implemented on the base "
+                                  "class.")
+
+
+class AverageGradientANOVA(GradientANOVA):
+    r"""Perform trajectory analysis using the RMS average algorithm
+
+    For each group in a category, it computes the average point among the
+    samples in such group and then computes the norm of each sample from the
+    averaged one.
+
+    See Also
+    --------
+    GradientANOVA
+    """
+
+    _alg_name = 'avg'
+
+    def _compute_trajectories_results(self, group_name, trajectories):
+        r"""Do the actual trajectory computation over trajectories
+
+        Parameters
+        ----------
+        group_name : str
+            The name of the group
+        trajectories : pandas.DataFrame
+            The sorted trajectories for each sample in the group
+
+        Returns
+        -------
+        GroupResults
+            The trajectory results for `group_name` using the average
+            trajectories method
+        """
+        center = np.average(trajectories, axis=0)
+        if len(trajectories) == 1:
+            trajectory = np.array([np.linalg.norm(center)])
+            calc = {'avg': trajectory[0]}
+        else:
+            trajectory = np.array([np.linalg.norm(row[1].get_values() - center)
+                                   for row in trajectories.iterrows()])
+            calc = {'avg': np.average(trajectory)}
+
+        msg = ''.join(self._message_buffer) if self._message_buffer else None
+        # Reset the message buffer
+        self._message_buffer = []
+        return GroupResults(group_name, trajectory, np.mean(trajectory),
+                            calc, msg)
+
+
+class TrajectoryGradientANOVA(GradientANOVA):
+    r"""Perform trajectory analysis using the RMS trajectory algorithm
+
+    For each group in a category, each component of the result trajectory is
+    computed as taking the sorted list of samples in the group and taking the
+    norm of the coordinates of the 2nd sample minus 1st sample, 3rd sample
+    minus 2nd sample and so on.
+
+    See Also
+    --------
+    GradientANOVA
+    """
+
+    _alg_name = 'trajectory'
+
+    def _compute_trajectories_results(self, group_name, trajectories):
+        r"""Do the actual trajectory computation over trajectories
+
+        Parameters
+        ----------
+        group_name : str
+            The name of the group
+        trajectories : pandas.DataFrame
+            The sorted trajectories for each sample in the group
+
+        Returns
+        -------
+        GroupResults
+            The trajectory results for `group_name` using the trajectory
+            method
+        """
+        if len(trajectories) == 1:
+            trajectory = np.array([np.linalg.norm(trajectories)])
+            calc = {'2-norm': trajectory[0]}
+        else:
+            # Loop through all the rows in trajectories and create '2-norm'
+            # by taking the norm of the 2nd row - 1st row, 3rd row - 2nd row...
+            trajectory = \
+                np.array([np.linalg.norm(trajectories.ix[i+1].get_values() -
+                                         trajectories.ix[i].get_values())
+                          for i in range(len(trajectories) - 1)])
+            calc = {'2-norm': np.linalg.norm(trajectory)}
+
+        msg = ''.join(self._message_buffer) if self._message_buffer else None
+        # Reset the message buffer
+        self._message_buffer = []
+        return GroupResults(group_name, trajectory, np.mean(trajectory),
+                            calc, msg)
+
+
+class FirstDifferenceGradientANOVA(GradientANOVA):
+    r"""Perform trajectory analysis using the first difference algorithm
+
+    It calculates the norm for all the time-points and then calculates the
+    first difference for each resulting point
+
+    See Also
+    --------
+    GradientANOVA
+    """
+
+    _alg_name = 'diff'
+
+    def _compute_trajectories_results(self, group_name, trajectories):
+        r"""Do the actual trajectory computation over trajectories
+
+        Parameters
+        ----------
+        group_name : str
+            The name of the group
+        trajectories : pandas.DataFrame
+            The sorted trajectories for each sample in the group
+
+        Returns
+        -------
+        GroupResults
+            The trajectory results for `group_name` using the first difference
+            method
+        """
+        if len(trajectories) == 1:
+            trajectory = np.array([np.linalg.norm(trajectories)])
+            calc = {'mean': trajectory[0], 'std': 0}
+        elif len(trajectories) == 2:
+            trajectory = np.array([np.linalg.norm(trajectories[1] -
+                                                  trajectories[0])])
+            calc = {'mean': trajectory[0], 'std': 0}
+        else:
+            vec_norm = \
+                np.array([np.linalg.norm(trajectories.ix[i+1].get_values() -
+                                         trajectories.ix[i].get_values())
+                          for i in range(len(trajectories) - 1)])
+            trajectory = np.diff(vec_norm)
+            calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
+
+        msg = ''.join(self._message_buffer) if self._message_buffer else None
+        # Reset the message buffer
+        self._message_buffer = []
+        return GroupResults(group_name, trajectory, np.mean(trajectory),
+                            calc, msg)
+
+
+class WindowDifferenceGradientANOVA(GradientANOVA):
+    r"""Perform trajectory analysis using the modified first difference
+    algorithm
+
+    It calculates the norm for all the time-points and subtracts the mean of
+    the next number of elements specified in `window_size` and the current
+    element.
+
+    Parameters
+    ----------
+    coords : pandas.DataFrame
+        The coordinates for each sample id
+    prop_expl : array like
+        The numpy 1-D array with the proportion explained by each axis in
+        coords
+    metadata_map : pandas.DataFrame
+        The metadata map, indexed by sample ids and columns are metadata
+        categories
+    window_size : int or long
+        The window size to use while computing the differences
+
+    Raises
+    ------
+    ValueError
+        If the window_size is not a positive integer
+
+    See Also
+    --------
+    GradientANOVA
+    """
+
+    _alg_name = 'wdiff'
+
+    def __init__(self, coords, prop_expl, metadata_map, window_size, **kwargs):
+        super(WindowDifferenceGradientANOVA, self).__init__(coords, prop_expl,
+                                                            metadata_map,
+                                                            **kwargs)
+
+        if not isinstance(window_size, Integral) or window_size < 1:
+            raise ValueError("The window_size must be a positive integer")
+
+        self._window_size = window_size
+
+    def _compute_trajectories_results(self, group_name, trajectories):
+        r"""Do the actual trajectory computation over trajectories
+
+        If the first difference cannot be calculated of the provided window
+        size, no difference is applied and a message is added to the results.
+
+        Parameters
+        ----------
+        group_name : str
+            The name of the group
+        trajectories : pandas.DataFrame
+            The sorted trajectories for each sample in the group
+
+        Returns
+        -------
+        GroupResults
+            The trajectory results for `group_name` using the windowed
+            difference method
+        """
+        if len(trajectories) == 1:
+            trajectory = np.array([np.linalg.norm(trajectories)])
+            calc = {'mean': trajectory, 'std': 0}
+        elif len(trajectories) == 2:
+            trajectory = np.array([np.linalg.norm(trajectories[1] -
+                                                  trajectories[0])])
+            calc = {'mean': trajectory, 'std': 0}
+        else:
+            vec_norm = \
+                np.array([np.linalg.norm(trajectories.ix[i+1].get_values() -
+                                         trajectories.ix[i].get_values())
+                          for i in range(len(trajectories) - 1)])
+            # windowed first differences won't be able on every group,
+            # specially given the variation of size that a trajectory tends
+            # to have
+            if len(vec_norm) <= self._window_size:
+                trajectory = vec_norm
+                self._message_buffer.append("Cannot calculate the first "
+                                            "difference with a window of size "
+                                            "(%d)." % self._window_size)
+            else:
+                # Replicate the last element as many times as required
+                for idx in range(0, self._window_size):
+                    vec_norm = np.append(vec_norm, vec_norm[-1:], axis=0)
+                trajectory = []
+                for idx in range(0, len(vec_norm) - self._window_size):
+                    # Meas has to be over axis 0 so it handles arrays of arrays
+                    element = np.mean(vec_norm[(idx + 1):
+                                               (idx + 1 + self._window_size)],
+                                      axis=0)
+                    trajectory.append(element - vec_norm[idx])
+                trajectory = np.array(trajectory)
+
+            calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
+
+        msg = ''.join(self._message_buffer) if self._message_buffer else None
+        # Reset the message buffer
+        self._message_buffer = []
+        return GroupResults(group_name, trajectory, np.mean(trajectory),
+                            calc, msg)
diff --git a/skbio/stats/ordination/__init__.py b/skbio/stats/ordination/__init__.py
new file mode 100644
index 0000000..20ffa79
--- /dev/null
+++ b/skbio/stats/ordination/__init__.py
@@ -0,0 +1,123 @@
+r"""
+Ordination methods (:mod:`skbio.stats.ordination`)
+==================================================
+
+.. currentmodule:: skbio.stats.ordination
+
+This module contains several ordination methods, including Principal
+Coordinate Analysis, Correspondence Analysis, Redundancy Analysis and
+Canonical Correspondence Analysis.
+
+Classes
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+   PCoA
+   CA
+   RDA
+   CCA
+   OrdinationResults
+
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   mean_and_std
+   corr
+   scale
+   svd_rank
+
+Testing Utilities
+-----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   assert_ordination_results_equal
+
+Examples
+--------
+
+This is an artificial dataset (table 11.3 in [1]_) that represents fish
+abundance in different sites (`Y`, the response variables) and
+environmental variables (`X`, the explanatory variables).
+
+>>> import numpy as np
+>>> X = np.array([[1.0, 0.0, 1.0, 0.0],
+...               [2.0, 0.0, 1.0, 0.0],
+...               [3.0, 0.0, 1.0, 0.0],
+...               [4.0, 0.0, 0.0, 1.0],
+...               [5.0, 1.0, 0.0, 0.0],
+...               [6.0, 0.0, 0.0, 1.0],
+...               [7.0, 1.0, 0.0, 0.0],
+...               [8.0, 0.0, 0.0, 1.0],
+...               [9.0, 1.0, 0.0, 0.0],
+...               [10.0, 0.0, 0.0, 1.0]])
+>>> Y = np.array([[1, 0, 0, 0, 0, 0, 2, 4, 4],
+...               [0, 0, 0, 0, 0, 0, 5, 6, 1],
+...               [0, 1, 0, 0, 0, 0, 0, 2, 3],
+...               [11, 4, 0, 0, 8, 1, 6, 2, 0],
+...               [11, 5, 17, 7, 0, 0, 6, 6, 2],
+...               [9, 6, 0, 0, 6, 2, 10, 1, 4],
+...               [9, 7, 13, 10, 0, 0, 4, 5, 4],
+...               [7, 8, 0, 0, 4, 3, 6, 6, 4],
+...               [7, 9, 10, 13, 0, 0, 6, 2, 0],
+...               [5, 10, 0, 0, 2, 4, 0, 1, 3]])
+
+We can now create a CCA object to perform canonical correspondence
+analysis. Matrix `X` contains a continuous variable (depth) and a
+categorical one (substrate type) encoded using a one-hot encoding. We
+explicitly need to avoid perfect collinearity, so we'll drop one of
+the substrate types (the last column of `X`). We also expect to
+increase pandas integration to ease analyses.
+
+>>> from skbio.stats.ordination import CCA
+>>> ordination_result = CCA(Y, X[:, :-1],
+...                         ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
+...                          'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
+...                         ['Species0', 'Species1', 'Species2', 'Species3',
+...                          'Species4', 'Species5', 'Species6', 'Species7',
+...                          'Species8'])
+
+Exploring the results we see that the first three axes explain about
+80% of all the variance.
+
+>>> sc_2 = ordination_result.scores(scaling=2)
+>>> print sc_2.proportion_explained
+[ 0.46691091  0.23832652  0.10054837  0.10493671  0.04480535  0.02974698
+  0.01263112  0.00156168  0.00053235]
+
+References
+----------
+
+.. [1] Legendre P. and Legendre L. 1998. Numerical Ecology. Elsevier,
+   Amsterdam.
+
+"""
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._correspondence_analysis import CA
+from ._redundancy_analysis import RDA
+from ._canonical_correspondence_analysis import CCA
+from ._principal_coordinate_analysis import PCoA
+from ._base import OrdinationResults
+from ._utils import (mean_and_std, scale, svd_rank, corr,
+                     assert_ordination_results_equal)
+
+__all__ = ['CA', 'RDA', 'CCA', 'PCoA', 'OrdinationResults', 'mean_and_std',
+           'scale', 'svd_rank', 'corr', 'assert_ordination_results_equal']
+
+test = Tester().test
diff --git a/skbio/stats/ordination/_base.py b/skbio/stats/ordination/_base.py
new file mode 100644
index 0000000..5479f40
--- /dev/null
+++ b/skbio/stats/ordination/_base.py
@@ -0,0 +1,472 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import zip
+
+import warnings
+from functools import partial
+
+import numpy as np
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+from IPython.core.pylabtools import print_figure
+from IPython.core.display import Image, SVG
+
+from skbio._base import SkbioObject
+from skbio.stats._misc import _pprint_strs
+
+# avoid flake8 unused import error
+Axes3D
+
+
+class OrdinationResults(SkbioObject):
+    """Store ordination results, providing serialization and plotting support.
+
+    Stores various components of ordination results. Provides methods for
+    serializing/deserializing results, as well as generation of basic
+    matplotlib 3-D scatterplots. Will automatically display PNG/SVG
+    representations of itself within the IPython Notebook.
+
+    Attributes
+    ----------
+    eigvals : 1-D numpy array
+        The result eigenvalues
+    species : 2-D numpy array
+        The result coordinates for each species
+    site : 2-D numpy array
+        The results coordinates for each site
+    biplot : 2-D numpy array
+        The result biplot coordinates
+    site_constraints : 2-D numpy array
+        The result coordinates for each site constraint
+    proportion_explained : 1-D numpy array
+        The proportion explained by each eigenvector
+    species_ids : list of str
+        The species identifiers
+    site_ids : list of str
+        The site identifiers
+    png
+    svg
+
+    """
+    default_write_format = 'ordination'
+
+    def __init__(self, eigvals, species=None, site=None, biplot=None,
+                 site_constraints=None, proportion_explained=None,
+                 species_ids=None, site_ids=None):
+        self.eigvals = eigvals
+        self.species = species
+        self.site = site
+        self.biplot = biplot
+        self.site_constraints = site_constraints
+        self.proportion_explained = proportion_explained
+        self.species_ids = species_ids
+        self.site_ids = site_ids
+
+    @classmethod
+    def from_file(cls, ord_res_f):
+        """Load ordination results from text file.
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``from_file`` will be removed in scikit-bio 0.3.0. It is replaced by
+           ``read``, which is a more general method for deserializing
+           ordination results. ``read`` supports multiple file formats,
+           automatic file format detection, etc. by taking advantage of
+           scikit-bio's I/O registry system. See :mod:`skbio.io` for more
+           details.
+
+        Creates an ``OrdinationResults`` instance from a ``ordination``
+        formatted file. See :mod:`skbio.io.ordination` for the format
+        specification.
+
+        Parameters
+        ----------
+        ord_res_f: filepath or filehandle
+            File to read from.
+
+        Returns
+        -------
+        OrdinationResults
+            Instance of type `cls` containing the parsed contents of
+            `ord_res_f`.
+
+        Raises
+        ------
+        OrdinationFormatError
+            If the format of the file is not valid, or if the shapes of the
+            different sections of the file are not consistent.
+
+        See Also
+        --------
+        read
+
+        """
+        warnings.warn(
+            "OrdinationResults.from_file is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use "
+            "OrdinationResults.read.", DeprecationWarning)
+        return cls.read(ord_res_f, format='ordination')
+
+    def to_file(self, out_f):
+        """Save ordination results to file in text format.
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``to_file`` will be removed in scikit-bio 0.3.0. It is replaced by
+           ``write``, which is a more general method for serializing ordination
+           results. ``write`` supports multiple file formats by taking
+           advantage of scikit-bio's I/O registry system. See :mod:`skbio.io`
+           for more details.
+
+        Serializes ordination results as an ``ordination`` formatted file. See
+        :mod:`skbio.io.ordination` for the format specification.
+
+        Parameters
+        ----------
+        out_f : filepath or filehandle
+            File to write to.
+
+        See Also
+        --------
+        write
+
+        """
+        warnings.warn(
+            "OrdinationResults.to_file is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use "
+            "OrdinationResults.write.", DeprecationWarning)
+        self.write(out_f, format='ordination')
+
+    def __str__(self):
+        """Return a string representation of the ordination results.
+
+        String representation lists ordination results attributes and indicates
+        whether or not they are present. If an attribute is present, its
+        dimensions are listed. A truncated list of species and site IDs are
+        included (if they are present).
+
+        Returns
+        -------
+        str
+            String representation of the ordination results.
+
+        .. shownumpydoc
+
+        """
+        lines = ['Ordination results:']
+
+        attrs = [(self.eigvals, 'Eigvals'),
+                 (self.proportion_explained, 'Proportion explained'),
+                 (self.species, 'Species'),
+                 (self.site, 'Site'),
+                 (self.biplot, 'Biplot'),
+                 (self.site_constraints, 'Site constraints')]
+        for attr, attr_label in attrs:
+            def formatter(e):
+                return 'x'.join(['%d' % s for s in e.shape])
+
+            lines.append(self._format_attribute(attr, attr_label, formatter))
+
+        lines.append(self._format_attribute(self.species_ids, 'Species IDs',
+                                            lambda e: _pprint_strs(e)))
+        lines.append(self._format_attribute(self.site_ids, 'Site IDs',
+                                            lambda e: _pprint_strs(e)))
+
+        return '\n'.join(lines)
+
+    def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
+             title='', cmap=None, s=20):
+        """Create a 3-D scatterplot of ordination results colored by metadata.
+
+        Creates a 3-D scatterplot of the ordination results, where each point
+        represents a site. Optionally, these points can be colored by metadata
+        (see `df` and `column` below).
+
+        Parameters
+        ----------
+        df : pandas.DataFrame, optional
+            ``DataFrame`` containing site metadata. Must be indexed by site ID,
+            and all site IDs in the ordination results must exist in the
+            ``DataFrame``. If ``None``, sites (i.e., points) will not be
+            colored by metadata.
+        column : str, optional
+            Column name in `df` to color sites (i.e., points in the plot) by.
+            Cannot have missing data (i.e., ``np.nan``). `column` can be
+            numeric or categorical. If numeric, all values in the column will
+            be cast to ``float`` and mapped to colors using `cmap`. A colorbar
+            will be included to serve as a legend. If categorical (i.e., not
+            all values in `column` could be cast to ``float``), colors will be
+            chosen for each category using evenly-spaced points along `cmap`. A
+            legend will be included. If ``None``, sites (i.e., points) will not
+            be colored by metadata.
+        axes : iterable of int, optional
+            Indices of site coordinates to plot on the x-, y-, and z-axes. For
+            example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
+            PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
+            Must contain exactly three elements.
+        axis_labels : iterable of str, optional
+            Labels for the x-, y-, and z-axes. If ``None``, labels will be the
+            values of `axes` cast as strings.
+        title : str, optional
+            Plot title.
+        cmap : str or matplotlib.colors.Colormap, optional
+            Name or instance of matplotlib colormap to use for mapping `column`
+            values to colors. If ``None``, defaults to the colormap specified
+            in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
+            are recommended for categorical data, while sequential colormaps
+            (e.g., ``Greys``) are recommended for numeric data. See [1]_ for
+            these colormap classifications.
+        s : scalar or iterable of scalars, optional
+            Size of points. See matplotlib's ``Axes3D.scatter`` documentation
+            for more details.
+
+        Returns
+        -------
+        matplotlib.figure.Figure
+            Figure containing the scatterplot and legend/colorbar if metadata
+            were provided.
+
+        Raises
+        ------
+        ValueError
+            Raised on invalid input, including the following situations:
+
+            - there are not at least three dimensions to plot
+            - there are not exactly three values in `axes`, they are not
+              unique, or are out of range
+            - there are not exactly three values in `axis_labels`
+            - either `df` or `column` is provided without the other
+            - `column` is not in the ``DataFrame``
+            - site IDs in the ordination results are not in `df` or have
+              missing data in `column`
+
+        See Also
+        --------
+        mpl_toolkits.mplot3d.Axes3D.scatter
+
+        Notes
+        -----
+        This method creates basic plots of ordination results, and is intended
+        to provide a quick look at the results in the context of metadata
+        (e.g., from within the IPython Notebook). For more customization and to
+        generate publication-quality figures, we recommend EMPeror [2]_.
+
+        References
+        ----------
+        .. [1] http://matplotlib.org/examples/color/colormaps_reference.html
+        .. [2] EMPeror: a tool for visualizing high-throughput microbial
+           community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
+           Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
+
+        Examples
+        --------
+        .. plot::
+
+           Define a distance matrix with four sites labelled A-D:
+
+           >>> from skbio import DistanceMatrix
+           >>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
+           ...                      [0.21712454, 0., 0.45995501, 0.80332382],
+           ...                      [0.5007512, 0.45995501, 0., 0.65463348],
+           ...                      [0.91769271, 0.80332382, 0.65463348, 0.]],
+           ...                     ['A', 'B', 'C', 'D'])
+
+           Define metadata for each site in a ``pandas.DataFrame``:
+
+           >>> import pandas as pd
+           >>> metadata = {
+           ...     'A': {'body_site': 'skin'},
+           ...     'B': {'body_site': 'gut'},
+           ...     'C': {'body_site': 'gut'},
+           ...     'D': {'body_site': 'skin'}}
+           >>> df = pd.DataFrame.from_dict(metadata, orient='index')
+
+           Run principal coordinate analysis (PCoA) on the distance matrix:
+
+           >>> from skbio.stats.ordination import PCoA
+           >>> pcoa_results = PCoA(dm).scores()
+
+           Plot the ordination results, where each site is colored by body site
+           (a categorical variable):
+
+           >>> fig = pcoa_results.plot(df=df, column='body_site',
+           ...                         title='Sites colored by body site',
+           ...                         cmap='Set1', s=50)
+
+        """
+        # Note: New features should not be added to this method and should
+        # instead be added to EMPeror (http://biocore.github.io/emperor/).
+        # Only bug fixes and minor updates should be made to this method.
+
+        coord_matrix = self.site.T
+        self._validate_plot_axes(coord_matrix, axes)
+
+        # derived from
+        # http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
+        fig = plt.figure()
+        ax = fig.add_subplot(111, projection='3d')
+
+        xs = coord_matrix[axes[0]]
+        ys = coord_matrix[axes[1]]
+        zs = coord_matrix[axes[2]]
+
+        point_colors, category_to_color = self._get_plot_point_colors(
+            df, column, self.site_ids, cmap)
+
+        scatter_fn = partial(ax.scatter, xs, ys, zs, s=s)
+        if point_colors is None:
+            plot = scatter_fn()
+        else:
+            plot = scatter_fn(c=point_colors, cmap=cmap)
+
+        if axis_labels is None:
+            axis_labels = ['%d' % axis for axis in axes]
+        elif len(axis_labels) != 3:
+            raise ValueError("axis_labels must contain exactly three elements "
+                             "(found %d elements)." % len(axis_labels))
+
+        ax.set_xlabel(axis_labels[0])
+        ax.set_ylabel(axis_labels[1])
+        ax.set_zlabel(axis_labels[2])
+        ax.set_xticklabels([])
+        ax.set_yticklabels([])
+        ax.set_zticklabels([])
+        ax.set_title(title)
+
+        # create legend/colorbar
+        if point_colors is not None:
+            if category_to_color is None:
+                fig.colorbar(plot)
+            else:
+                self._plot_categorical_legend(ax, category_to_color)
+
+        return fig
+
+    def _validate_plot_axes(self, coord_matrix, axes):
+        """Validate `axes` against coordinates matrix."""
+        num_dims = coord_matrix.shape[0]
+        if num_dims < 3:
+            raise ValueError("At least three dimensions are required to plot "
+                             "ordination results. There are only %d "
+                             "dimension(s)." % num_dims)
+        if len(axes) != 3:
+            raise ValueError("axes must contain exactly three elements (found "
+                             "%d elements)." % len(axes))
+        if len(set(axes)) != 3:
+            raise ValueError("The values provided for axes must be unique.")
+
+        for idx, axis in enumerate(axes):
+            if axis < 0 or axis >= num_dims:
+                raise ValueError("axes[%d] must be >= 0 and < %d." %
+                                 (idx, num_dims))
+
+    def _get_plot_point_colors(self, df, column, ids, cmap):
+        """Return a list of colors for each plot point given a metadata column.
+
+        If `column` is categorical, additionally returns a dictionary mapping
+        each category (str) to color (used for legend creation).
+
+        """
+        if ((df is None and column is not None) or (df is not None and
+                                                    column is None)):
+            raise ValueError("Both df and column must be provided, or both "
+                             "must be None.")
+        elif df is None and column is None:
+            point_colors, category_to_color = None, None
+        else:
+            if column not in df:
+                raise ValueError("Column '%s' not in data frame." % column)
+
+            col_vals = df.loc[ids, column]
+
+            if col_vals.isnull().any():
+                raise ValueError("One or more IDs in the ordination results "
+                                 "are not in the data frame, or there is "
+                                 "missing data in the data frame's '%s' "
+                                 "column." % column)
+
+            category_to_color = None
+            try:
+                point_colors = col_vals.astype(float)
+            except ValueError:
+                # we have categorical data, so choose a color for each
+                # category, where colors are evenly spaced across the
+                # colormap.
+                # derived from http://stackoverflow.com/a/14887119
+                categories = col_vals.unique()
+                cmap = plt.get_cmap(cmap)
+                category_colors = cmap(np.linspace(0, 1, len(categories)))
+
+                category_to_color = dict(zip(categories, category_colors))
+                point_colors = col_vals.apply(lambda x: category_to_color[x])
+
+            point_colors = point_colors.tolist()
+
+        return point_colors, category_to_color
+
+    def _plot_categorical_legend(self, ax, color_dict):
+        """Add legend to plot using specified mapping of category to color."""
+        # derived from http://stackoverflow.com/a/20505720
+        proxies = []
+        labels = []
+        for category in color_dict:
+            proxy = mpl.lines.Line2D([0], [0], linestyle='none',
+                                     c=color_dict[category], marker='o')
+            proxies.append(proxy)
+            labels.append(category)
+
+        # place legend outside of the axes (centered)
+        # derived from http://matplotlib.org/users/legend_guide.html
+        ax.legend(proxies, labels, numpoints=1, loc=6,
+                  bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
+
+    # Here we define the special repr methods that provide the IPython display
+    # protocol. Code derived from:
+    #     https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
+    #         Custom%20Display%20Logic.ipynb
+    # See licenses/ipython.txt for more details.
+
+    def _repr_png_(self):
+        return self._figure_data('png')
+
+    def _repr_svg_(self):
+        return self._figure_data('svg')
+
+    # We expose the above reprs as properties, so that the user can see them
+    # directly (since otherwise the client dictates which one it shows by
+    # default)
+    @property
+    def png(self):
+        """Display basic 3-D scatterplot in IPython Notebook as PNG."""
+        return Image(self._repr_png_(), embed=True)
+
+    @property
+    def svg(self):
+        """Display basic 3-D scatterplot in IPython Notebook as SVG."""
+        return SVG(self._repr_svg_())
+
+    def _figure_data(self, format):
+        fig = self.plot()
+        data = print_figure(fig, format)
+        # We MUST close the figure, otherwise IPython's display machinery
+        # will pick it up and send it as output, resulting in a double display
+        plt.close(fig)
+        return data
+
+    def _format_attribute(self, attr, attr_label, formatter):
+        if attr is None:
+            formatted_attr = 'N/A'
+        else:
+            formatted_attr = formatter(attr)
+        return '\t%s: %s' % (attr_label, formatted_attr)
+
+
+class Ordination(object):
+    short_method_name = 'Overwrite in subclass!'
+    long_method_name = 'Overwrite in subclass!'
diff --git a/skbio/stats/ordination/_canonical_correspondence_analysis.py b/skbio/stats/ordination/_canonical_correspondence_analysis.py
new file mode 100644
index 0000000..c51bec2
--- /dev/null
+++ b/skbio/stats/ordination/_canonical_correspondence_analysis.py
@@ -0,0 +1,250 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+
+from ._base import Ordination, OrdinationResults
+from ._utils import corr, svd_rank, scale
+
+
+class CCA(Ordination):
+    r"""Compute constrained (also known as canonical) correspondence
+    analysis.
+
+    Canonical (or constrained) correspondence analysis is a
+    multivariate ordination technique. It appeared in community
+    ecology [1]_ and relates community composition to the variation in
+    the environment (or in other factors). It works from data on
+    abundances or counts of individuals and environmental variables,
+    and outputs ordination axes that maximize niche separation among
+    species.
+
+    It is better suited to extract the niches of taxa than linear
+    multivariate methods because it assumes unimodal response curves
+    (habitat preferences are often unimodal functions of habitat
+    variables [2]_).
+
+    As more environmental variables are added, the result gets more
+    similar to unconstrained ordination, so only the variables that
+    are deemed explanatory should be included in the analysis.
+
+    Parameters
+    ----------
+    Y : array_like Community data matrix of shape (n, m): a
+        contingency table for m species at n sites.
+    X : array_like Constraining matrix of shape (n, q): q quantitative
+        environmental variables at n sites.
+
+    Notes
+    -----
+
+    The algorithm is based on [3]_, \S 11.2, and is expected to give
+    the same results as ``cca(Y, X)`` in R's package vegan, except
+    that this implementation won't drop constraining variables due to
+    perfect collinearity: the user needs to choose which ones to
+    input.
+
+    Canonical *correspondence* analysis shouldn't be confused with
+    canonical *correlation* analysis (CCorA, but sometimes called
+    CCA), a different technique to search for multivariate
+    relationships between two datasets. Canonical correlation analysis
+    is a statistical tool that, given two vectors of random variables,
+    finds linear combinations that have maximum correlation with each
+    other. In some sense, it assumes linear responses of "species" to
+    "environmental variables" and is not well suited to analyze
+    ecological data.
+
+    In data analysis, ordination (or multivariate gradient analysis)
+    complements clustering by arranging objects (species, samples...)
+    along gradients so that similar ones are closer and dissimilar
+    ones are further. There's a good overview of the available
+    techniques in http://ordination.okstate.edu/overview.htm.
+
+    See Also
+    --------
+    CA
+    RDA
+
+    References
+    ----------
+    .. [1] Cajo J. F. Ter Braak, "Canonical Correspondence Analysis: A
+        New Eigenvector Technique for Multivariate Direct Gradient
+        Analysis", Ecology 67.5 (1986), pp. 1167-1179.
+
+    .. [2] Cajo J.F. Braak and Piet F.M. Verdonschot, "Canonical
+        correspondence analysis and related multivariate methods in
+        aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
+
+    .. [3] Legendre P. and Legendre L. 1998. Numerical
+       Ecology. Elsevier, Amsterdam.
+
+    """
+    short_method_name = 'CCA'
+    long_method_name = 'Canonical Correspondence Analysis'
+
+    def __init__(self, Y, X, site_ids, species_ids):
+        self.Y = np.asarray(Y, dtype=np.float64)
+        self.X = np.asarray(X, dtype=np.float64)
+        self.site_ids = site_ids
+        self.species_ids = species_ids
+        self._cca()
+
+    def _cca(self):
+        X, Y = self.X, self.Y
+        if X.shape[0] != Y.shape[0]:
+            raise ValueError("Contingency and environmental tables must have"
+                             " the same number of rows (sites). X has {0}"
+                             " rows but Y has {1}.".format(X.shape[0],
+                                                           Y.shape[0]))
+        if Y.min() < 0:
+            raise ValueError("Contingency table must be nonnegative")
+        row_max = Y.max(axis=1)
+        if np.any(row_max <= 0):
+            # Or else the lstsq call to compute Y_hat breaks
+            raise ValueError("Contingency table cannot contain row of only 0s")
+
+        # Step 1 (similar to Pearson chi-square statistic)
+        grand_total = Y.sum()
+        Q = Y / grand_total  # Relative frequencies of X (contingency table)
+
+        # Species and site weights (marginal totals)
+        column_marginals = Q.sum(axis=0)
+        row_marginals = Q.sum(axis=1)
+
+        # Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
+        # scaled version of the contribution of each cell towards Pearson
+        # chi-square statistic.
+        expected = np.outer(row_marginals, column_marginals)
+        Q_bar = (Q - expected) / np.sqrt(expected)
+
+        # Step 2. Standardize columns of Y with respect to site weights,
+        # using the maximum likelyhood variance estimator (Legendre &
+        # Legendre 1998, p. 595)
+        X = scale(X, weights=row_marginals, ddof=0)
+
+        # Step 3. Weighted multiple regression.
+        X_weighted = row_marginals[:, None]**0.5 * X
+        B, _, rank_lstsq, _ = np.linalg.lstsq(X_weighted, Q_bar)
+        Y_hat = X_weighted.dot(B)
+        Y_res = Q_bar - Y_hat
+
+        # Step 4. Eigenvalue decomposition
+        u, s, vt = np.linalg.svd(Y_hat, full_matrices=False)
+        rank = svd_rank(Y_hat.shape, s)
+        s = s[:rank]
+        u = u[:, :rank]
+        vt = vt[:rank]
+        U = vt.T
+
+        # Step 5. Eq. 9.38
+        U_hat = Q_bar.dot(U) * s**-1
+
+        # Residuals analysis
+        u_res, s_res, vt_res = np.linalg.svd(Y_res, full_matrices=False)
+        rank = svd_rank(Y_res.shape, s_res)
+        s_res = s_res[:rank]
+        u_res = u_res[:, :rank]
+        vt_res = vt_res[:rank]
+
+        U_res = vt_res.T
+        U_hat_res = Y_res.dot(U_res) * s_res**-1
+
+        # Storing values needed to compute scores
+        iter_ = (('column_marginals', column_marginals),
+                 ('row_marginals', row_marginals),
+                 ('U', U),
+                 ('U_res', U_res),
+                 ('U_hat', U_hat),
+                 ('U_hat_res', U_hat_res),
+                 ('u', u), ('Y_hat', Y_hat),
+                 ('s', s), ('s_res', s_res),
+                 ('X_weighted', X_weighted[:, :rank_lstsq]))
+        for val_name, val in iter_:
+            setattr(self, val_name, val)
+
+        self.eigenvalues = np.r_[s, s_res]**2
+
+    def scores(self, scaling):
+        r"""Compute site and species scores for different scalings.
+
+        Parameters
+        ----------
+        scaling : int
+            The same options as in `CA` are available, and the
+            interpretation is the same.
+
+        Returns
+        -------
+        OrdinationResults
+            Object that stores the computed eigenvalues, the
+            proportion explained by each of them (per unit),
+            transformed coordinates for species and sites, biplot
+            scores, site constraints, etc.
+
+        See Also
+        --------
+        OrdinationResults
+        """
+        if scaling not in {1, 2}:
+            raise NotImplementedError(
+                "Scaling {0} not implemented.".format(scaling))
+        # In this case scores are also a bit intertwined, so we'll
+        # almost compute them both and then choose.
+
+        # Scalings (p. 596 L&L 1998):
+        # Species scores, scaling 1
+        V = (self.column_marginals**-0.5)[:, None] * self.U
+
+        # Site scores, scaling 2
+        V_hat = (self.row_marginals**-0.5)[:, None] * self.U_hat
+
+        # Site scores, scaling 1
+        F = V_hat * self.s
+
+        # Species scores, scaling 2
+        F_hat = V * self.s
+
+        # Site scores which are linear combinations of environmental
+        # variables
+        Z_scaling1 = ((self.row_marginals**-0.5)[:, None] *
+                      self.Y_hat.dot(self.U))
+        Z_scaling2 = Z_scaling1 * self.s**-1
+
+        # Species residual scores, scaling 1
+        V_res = (self.column_marginals**-0.5)[:, None] * self.U_res
+
+        # Site residual scores, scaling 2
+        V_hat_res = (self.row_marginals**-0.5)[:, None] * self.U_hat_res
+
+        # Site residual scores, scaling 1
+        F_res = V_hat_res * self.s_res
+
+        # Species residual scores, scaling 2
+        F_hat_res = V_res * self.s_res
+
+        eigvals = self.eigenvalues
+        if scaling == 1:
+            species_scores = np.hstack((V, V_res))
+            site_scores = np.hstack((F, F_res))
+            site_constraints = np.hstack((Z_scaling1, F_res))
+        elif scaling == 2:
+            species_scores = np.hstack((F_hat, F_hat_res))
+            site_scores = np.hstack((V_hat, V_hat_res))
+            site_constraints = np.hstack((Z_scaling2, V_hat_res))
+
+        biplot_scores = corr(self.X_weighted, self.u)
+        return OrdinationResults(eigvals=eigvals,
+                                 proportion_explained=eigvals / eigvals.sum(),
+                                 species=species_scores,
+                                 site=site_scores,
+                                 biplot=biplot_scores,
+                                 site_constraints=site_constraints,
+                                 site_ids=self.site_ids,
+                                 species_ids=self.species_ids)
diff --git a/skbio/stats/ordination/_correspondence_analysis.py b/skbio/stats/ordination/_correspondence_analysis.py
new file mode 100644
index 0000000..7919c3c
--- /dev/null
+++ b/skbio/stats/ordination/_correspondence_analysis.py
@@ -0,0 +1,187 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+
+from ._base import Ordination, OrdinationResults
+from ._utils import svd_rank
+
+
+class CA(Ordination):
+    r"""Compute correspondence analysis, a multivariate statistical
+    technique for ordination.
+
+    In general, rows in the data table will correspond to sites and
+    columns to species, but the method is symmetric. In order to
+    measure the correspondence between rows and columns, the
+    :math:`\chi^2` distance is used, and those distances are preserved
+    in the transformed space. The :math:`\chi^2` distance doesn't take
+    double zeros into account, and so it is expected to produce better
+    ordination that PCA when the data has lots of zero values.
+
+    It is related to Principal Component Analysis (PCA) but it should
+    be preferred in the case of steep or long gradients, that is, when
+    there are many zeros in the input data matrix.
+
+    Parameters
+    ----------
+    X : array_like
+        Contingency table. It can be applied to different kinds of
+        data tables but data must be non-negative and dimensionally
+        homogeneous (quantitative or binary).
+
+    Notes
+    -----
+    The algorithm is based on [1]_, \S 9.4.1., and is expected to give
+    the same results as ``cca(X)`` in R's package vegan.
+
+    See Also
+    --------
+    CCA
+
+    References
+    ----------
+    .. [1] Legendre P. and Legendre L. 1998. Numerical
+       Ecology. Elsevier, Amsterdam.
+
+    """
+    short_method_name = 'CA'
+    long_method_name = 'Canonical Analysis'
+
+    def __init__(self, X, row_ids, column_ids):
+        self.X = np.asarray(X, dtype=np.float64)
+        self._ca()
+        self.row_ids = row_ids
+        self.column_ids = column_ids
+
+    def _ca(self):
+        X = self.X
+        r, c = X.shape
+
+        if X.min() < 0:
+            raise ValueError("Input matrix elements must be non-negative.")
+
+        # Step 1 (similar to Pearson chi-square statistic)
+        grand_total = X.sum()
+        Q = X / grand_total
+
+        column_marginals = Q.sum(axis=0)
+        row_marginals = Q.sum(axis=1)
+        # Let's store them since they're needed to compute scores
+        self.column_marginals = column_marginals
+        self.row_marginals = row_marginals
+
+        # Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's
+        # an scaled version of the contribution of each cell towards
+        # Pearson chi-square statistic.
+        expected = np.outer(row_marginals, column_marginals)
+        Q_bar = (Q - expected) / np.sqrt(expected)  # Eq. 9.32
+
+        # Step 2 (Singular Value Decomposition)
+        U_hat, W, Ut = np.linalg.svd(Q_bar, full_matrices=False)
+        # Due to the centering, there are at most min(r, c) - 1 non-zero
+        # eigenvalues (which are all positive)
+        rank = svd_rank(Q_bar.shape, W)
+        assert rank <= min(r, c) - 1
+        self.U_hat = U_hat[:, :rank]
+        self.W = W[:rank]
+        self.U = Ut[:rank].T
+
+    def scores(self, scaling):
+        r"""Compute site and species scores for different scalings.
+
+        Parameters
+        ----------
+        scaling : int
+
+            For a more detailed explanation of the interpretation, check
+            Legendre & Legendre 1998, section 9.4.3. The notes that
+            follow are quick recommendations.
+
+            Scaling type 1 maintains :math:`\chi^2` distances between
+            rows (sites): in the transformed space, the euclidean
+            distances between rows are equal to the :math:`\chi^2`
+            distances between rows in the original space. It should be
+            used when studying the ordination of sites. Rows (sites)
+            that are near a column (species) have high contributions
+            from it.
+
+            Scaling type 2 preserves :math:`\chi^2` distances between
+            columns (species), so euclidean distance between columns
+            after transformation is equal to :math:`\chi^2` distance
+            between columns in the original space. It is best used
+            when we are interested in the ordination of species. A
+            column (species) that is next to a row (site) means that
+            it is more abundant there.
+
+            Other types of scalings are currently not implemented, as
+            they're less used by ecologists (Legendre & Legendre 1998,
+            p. 456).
+
+            In general, species appearing far from the center of the
+            biplot and far from its edges will probably exhibit better
+            relationships than species either in the center (may be
+            multimodal species, not related to the shown ordination
+            axes...) or the edges (sparse species...).
+
+        Returns
+        -------
+        OrdinationResults
+            Object that stores the computed eigenvalues, the
+            proportion explained by each of them (per unit),
+            transformed coordinates, etc.
+
+        See Also
+        --------
+        OrdinationResults
+        """
+
+        if scaling not in {1, 2}:
+            raise NotImplementedError(
+                "Scaling {0} not implemented.".format(scaling))
+        # Both scalings are a bit intertwined, so we'll compute both and
+        # then choose
+        V = self.column_marginals[:, None]**-0.5 * self.U
+        V_hat = self.row_marginals[:, None]**-0.5 * self.U_hat
+        F = V_hat * self.W
+        # According to Formula 9.43, this should hold
+        # assert np.allclose(F, (row_marginals**-1)[:, None] * Q.dot(V))
+        # but it doesn't (notice that W**2==Lambda):
+        # (9.43a) F = V_hat W = D(p_i+)^{-1/2} U_hat W
+        #           = D(p_i+)^{-1/2} Q_bar U W^{-1} W  (substituting 9.38)
+        #           = D(p_i+)^{-1/2} Q_bar U
+        # (9.43b) F = D(p_i+)^{-1} Q V
+        #           = D(p_i+)^{-1} Q D(p_+j)^{-1/2} U  (substituting 9.41)
+        #           = D(p_i+)^{-1/2} D(p_i+)^{-1/2} Q D(p_+j)^{-1/2} U
+        #           = D(p_i+)^{-1/2} Q_tilde U         (using 9.40)
+        # It holds if we replace Q in 9.43b with Q after centering, ie
+        # assert np.allclose(
+        #    F,
+        #    (row_marginals**-1)[:, None] * (Q - expected).dot(V))
+        # Comparing results with vegan and the examples in the book, 9.43a
+        # is the right one. The same issue happens in 9.44, where also
+        # 9.44a is the one that matches vegan's output.
+        # (9.44a) F_hat = V W = D(p_+j)^{-1/2} U W
+        #               = D(p_+j)^{-1/2} Q_bar' U_hat W^{-1} W (using 9.39)
+        #               = D(p_+j)^{-1/2} Q_bar' U_hat
+        # (9.44b) F_hat = D(p_+j)^{-1} Q' V_hat
+        #               = D(p_+j)^{-1/2} Q_tilde' U_hat (using 9.40 and 9.42)
+        F_hat = V * self.W
+
+        # Eigenvalues
+        eigvals = self.W**2
+
+        # Species scores
+        species_scores = [V, F_hat][scaling - 1]
+        # Site scores (weighted averages of species scores)
+        site_scores = [F, V_hat][scaling - 1]
+        return OrdinationResults(eigvals=eigvals, species=species_scores,
+                                 site=site_scores, site_ids=self.row_ids,
+                                 species_ids=self.column_ids)
diff --git a/skbio/stats/ordination/_principal_coordinate_analysis.py b/skbio/stats/ordination/_principal_coordinate_analysis.py
new file mode 100644
index 0000000..7af3640
--- /dev/null
+++ b/skbio/stats/ordination/_principal_coordinate_analysis.py
@@ -0,0 +1,171 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from warnings import warn
+
+import numpy as np
+
+from skbio.stats.distance import DistanceMatrix
+from ._base import Ordination, OrdinationResults
+
+# - In cogent, after computing eigenvalues/vectors, the imaginary part
+#   is dropped, if any. We know for a fact that the eigenvalues are
+#   real, so that's not necessary, but eigenvectors can in principle
+#   be complex (see for example
+#   http://math.stackexchange.com/a/47807/109129 for details) and in
+#   that case dropping the imaginary part means they'd no longer be
+#   so, so I'm not doing that.
+
+
+class PCoA(Ordination):
+    r"""Perform Principal Coordinate Analysis.
+
+    Principal Coordinate Analysis (PCoA) is a method similar to PCA
+    that works from distance matrices, and so it can be used with
+    ecologically meaningful distances like unifrac for bacteria.
+
+    In ecology, the euclidean distance preserved by Principal
+    Component Analysis (PCA) is often not a good choice because it
+    deals poorly with double zeros (Species have unimodal
+    distributions along environmental gradients, so if a species is
+    absent from two sites at the same site, it can't be known if an
+    environmental variable is too high in one of them and too low in
+    the other, or too low in both, etc. On the other hand, if an
+    species is present in two sites, that means that the sites are
+    similar.).
+
+    Parameters
+    ==========
+    distance_matrix : DistanceMatrix
+        A distance matrix.
+
+    Notes
+    =====
+    It is sometimes known as metric multidimensional scaling or
+    classical scaling.
+
+    .. note::
+
+       If the distance is not euclidean (for example if it is a
+       semimetric and the triangle inequality doesn't hold),
+       negative eigenvalues can appear. There are different ways
+       to deal with that problem (see Legendre & Legendre 1998, \S
+       9.2.3), but none are currently implemented here.
+
+       However, a warning is raised whenever negative eigenvalues
+       appear, allowing the user to decide if they can be safely
+       ignored.
+    """
+    short_method_name = 'PCoA'
+    long_method_name = 'Principal Coordinate Analysis'
+
+    def __init__(self, distance_matrix):
+        if isinstance(distance_matrix, DistanceMatrix):
+            self.dm = np.asarray(distance_matrix.data, dtype=np.float64)
+            self.ids = distance_matrix.ids
+        else:
+            raise TypeError("Input must be a DistanceMatrix.")
+        self._pcoa()
+
+    def _pcoa(self):
+        E_matrix = self._E_matrix(self.dm)
+
+        # If the used distance was euclidean, pairwise distances
+        # needn't be computed from the data table Y because F_matrix =
+        # Y.dot(Y.T) (if Y has been centred).
+        F_matrix = self._F_matrix(E_matrix)
+
+        # If the eigendecomposition ever became a bottleneck, it could
+        # be replaced with an iterative version that computes the
+        # largest k eigenvectors.
+        eigvals, eigvecs = np.linalg.eigh(F_matrix)
+
+        # eigvals might not be ordered, so we order them (at least one
+        # is zero). cogent makes eigenvalues positive by taking the
+        # abs value, but that doesn't seem to be an approach accepted
+        # by L&L to deal with negative eigenvalues. We raise a warning
+        # in that case. First, we make values close to 0 equal to 0.
+        negative_close_to_zero = np.isclose(eigvals, 0)
+        eigvals[negative_close_to_zero] = 0
+        if np.any(eigvals < 0):
+            warn(
+                "The result contains negative eigenvalues."
+                " Please compare their magnitude with the magnitude of some"
+                " of the largest positive eigenvalues. If the negative ones"
+                " are smaller, it's probably safe to ignore them, but if they"
+                " are large in magnitude, the results won't be useful. See the"
+                " Notes section for more details. The smallest eigenvalue is"
+                " {0} and the largest is {1}.".format(eigvals.min(),
+                                                      eigvals.max()),
+                RuntimeWarning
+                )
+        idxs_descending = eigvals.argsort()[::-1]
+        self.eigvals = eigvals[idxs_descending]
+        self.eigvecs = eigvecs[:, idxs_descending]
+
+    def scores(self):
+        """Compute coordinates in transformed space.
+
+        Returns
+        -------
+        OrdinationResults
+            Object that stores the computed eigenvalues, the
+            proportion explained by each of them (per unit) and
+            transformed coordinates, etc.
+
+        See Also
+        --------
+        OrdinationResults
+        """
+        # Scale eigenvalues to have lenght = sqrt(eigenvalue). This
+        # works because np.linalg.eigh returns normalized
+        # eigenvectors. Each row contains the coordinates of the
+        # objects in the space of principal coordinates. Note that at
+        # least one eigenvalue is zero because only n-1 axes are
+        # needed to represent n points in an euclidean space.
+
+        # If we return only the coordinates that make sense (i.e., that have a
+        # corresponding positive eigenvalue), then Jackknifed Beta Diversity
+        # won't work as it expects all the OrdinationResults to have the same
+        # number of coordinates. In order to solve this issue, we return the
+        # coordinates that have a negative eigenvalue as 0
+        num_positive = (self.eigvals >= 0).sum()
+        eigvecs = self.eigvecs
+        eigvecs[:, num_positive:] = np.zeros(eigvecs[:, num_positive:].shape)
+        eigvals = self.eigvals
+        eigvals[num_positive:] = np.zeros(eigvals[num_positive:].shape)
+
+        coordinates = eigvecs * np.sqrt(eigvals)
+
+        proportion_explained = eigvals / eigvals.sum()
+
+        return OrdinationResults(eigvals=eigvals, site=coordinates,
+                                 proportion_explained=proportion_explained,
+                                 site_ids=self.ids)
+
+    @staticmethod
+    def _E_matrix(distance_matrix):
+        """Compute E matrix from a distance matrix.
+
+        Squares and divides by -2 the input elementwise. Eq. 9.20 in
+        Legendre & Legendre 1998."""
+        return distance_matrix * distance_matrix / -2
+
+    @staticmethod
+    def _F_matrix(E_matrix):
+        """Compute F matrix from E matrix.
+
+        Centring step: for each element, the mean of the corresponding
+        row and column are substracted, and the mean of the whole
+        matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
+        row_means = E_matrix.mean(axis=1, keepdims=True)
+        col_means = E_matrix.mean(axis=0, keepdims=True)
+        matrix_mean = E_matrix.mean()
+        return E_matrix - row_means - col_means + matrix_mean
diff --git a/skbio/stats/ordination/_redundancy_analysis.py b/skbio/stats/ordination/_redundancy_analysis.py
new file mode 100644
index 0000000..4c75513
--- /dev/null
+++ b/skbio/stats/ordination/_redundancy_analysis.py
@@ -0,0 +1,233 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+
+from ._base import Ordination, OrdinationResults
+from ._utils import corr, svd_rank, scale
+
+
+class RDA(Ordination):
+    r"""Compute redundancy analysis, a type of canonical analysis.
+
+    It is related to PCA and multiple regression because the explained
+    variables `Y` are fitted to the explanatory variables `X` and PCA
+    is then performed on the fitted values. A similar process is
+    performed on the residuals.
+
+    RDA should be chosen if the studied gradient is small, and CCA
+    when it's large, so that the contingency table is sparse.
+
+    Parameters
+    ----------
+    Y : array_like
+        :math:`n \times p` response matrix. Its columns need be
+        dimensionally homogeneous (or you can set `scale_Y=True`).
+    X : array_like
+        :math:`n \times m, n \geq m` matrix of explanatory
+        variables. Its columns need not be standardized, but doing so
+        turns regression coefficients into standard regression
+        coefficients.
+    scale_Y : bool, optional
+        Controls whether the response matrix columns are scaled to
+        have unit standard deviation. Defaults to `False`.
+
+    Notes
+    -----
+    The algorithm is based on [1]_, \S 11.1, and is expected to
+    give the same results as ``rda(Y, X)`` in R's package vegan.
+
+    See Also
+    --------
+    CCA
+
+    References
+    ----------
+    .. [1] Legendre P. and Legendre L. 1998. Numerical
+       Ecology. Elsevier, Amsterdam.
+
+    """
+
+    short_method_name = 'RDA'
+    long_method_name = 'Redundancy Analysis'
+
+    def __init__(self, Y, X, site_ids, species_ids, scale_Y=False):
+        self.Y = np.asarray(Y, dtype=np.float64)
+        self.X = np.asarray(X, dtype=np.float64)
+        self.site_ids = site_ids
+        self.species_ids = species_ids
+        self._rda(scale_Y)
+
+    def _rda(self, scale_Y):
+        n, p = self.Y.shape
+        n_, m = self.X.shape
+        if n != n_:
+            raise ValueError(
+                "Both data matrices must have the same number of rows.")
+        if n < m:
+            # Mmm actually vegan is able to do this case, too
+            raise ValueError(
+                "Explanatory variables cannot have less rows than columns.")
+
+        # Centre response variables (they must be dimensionally
+        # homogeneous)
+        Y = scale(self.Y, with_std=scale_Y)
+        # Centre explanatory variables
+        X = scale(self.X, with_std=False)
+
+        # Distribution of variables should be examined and transformed
+        # if necessary (see paragraph 4 in p. 580 L&L 1998)
+
+        # Compute Y_hat (fitted values by multivariate linear
+        # regression, that is, linear least squares). Formula 11.6 in
+        # L&L 1998 involves solving the normal equations, but that fails
+        # when cond(X) ~ eps**(-0.5). A more expensive but much more
+        # stable solution (fails when cond(X) ~ eps**-1) is computed
+        # using the QR decomposition of X = QR:
+        # (11.6) Y_hat = X [X' X]^{-1} X' Y
+        #              = QR [R'Q' QR]^{-1} R'Q' Y
+        #              = QR [R' R]^{-1} R'Q' Y
+        #              = QR R^{-1} R'^{-1} R' Q' Y
+        #              = Q Q' Y
+        # and B (matrix of regression coefficients)
+        # (11.4) B = [X' X]^{-1} X' Y
+        #          = R^{-1} R'^{-1} R' Q' Y
+        #          = R^{-1} Q'
+        # Q, R = np.linalg.qr(X)
+        # Y_hat = Q.dot(Q.T).dot(Y)
+        # B = scipy.linalg.solve_triangular(R, Q.T.dot(Y))
+        # This works provided X has full rank. When not, you can still
+        # fix it using R's pseudoinverse or partitioning R. To avoid any
+        # issues, like the numerical instability when trying to
+        # reproduce an example in L&L where X was rank-deficient, we'll
+        # just use `np.linalg.lstsq`, which uses the SVD decomposition
+        # under the hood and so it's also more expensive.
+        B, _, rank_X, _ = np.linalg.lstsq(X, Y)
+        Y_hat = X.dot(B)
+        # Now let's perform PCA on the fitted values from the multiple
+        # regression
+        u, s, vt = np.linalg.svd(Y_hat, full_matrices=False)
+        # vt are the right eigenvectors, which is what we need to
+        # perform PCA. That is, we're changing points in Y_hat from the
+        # canonical basis to the orthonormal basis given by the right
+        # eigenvectors of Y_hat (or equivalently, the eigenvectors of
+        # the covariance matrix Y_hat.T.dot(Y_hat))
+        # See 3) in p. 583 in L&L 1998
+        rank = svd_rank(Y_hat.shape, s)
+        # Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues
+
+        U = vt[:rank].T  # U as in Fig. 11.2
+
+        # Ordination in the space of response variables. Its columns are
+        # site scores. (Eq. 11.12)
+        F = Y.dot(U)
+        # Ordination in the space of explanatory variables. Its columns
+        # are fitted site scores. (Eq. 11.13)
+        Z = Y_hat.dot(U)
+
+        # Canonical coefficients (formula 11.14)
+        # C = B.dot(U)  # Not used
+
+        Y_res = Y - Y_hat
+        # PCA on the residuals
+        u_res, s_res, vt_res = np.linalg.svd(Y_res, full_matrices=False)
+        # See 9) in p. 587 in L&L 1998
+        rank_res = svd_rank(Y_res.shape, s_res)
+        # Theoretically, there're at most min(p, n - 1) non-zero eigenvaluesas
+
+        U_res = vt_res[:rank_res].T
+        F_res = Y_res.dot(U_res)  # Ordination in the space of residuals
+
+        # Storing values needed to compute scores
+        iter_ = (('U', U), ('U_res', U_res),
+                 ('F', F),
+                 ('F_res', F_res),
+                 ('Z', Z),
+                 ('u', u[:, :rank]))
+        for val_name, val in iter_:
+            setattr(self, val_name, val)
+
+        self.eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
+
+    def scores(self, scaling):
+        """Compute site, species and biplot scores for different scalings.
+
+        Parameters
+        ----------
+        scaling : int
+
+            Scaling type 1 produces a distance biplot. It focuses on
+            the ordination of rows (sites) because their transformed
+            distances approximate their original euclidean
+            distances. Especially interesting when most explanatory
+            variables are binary.
+
+            Scaling type 2 produces a correlation biplot. It focuses
+            on the relationships among explained variables (`Y`). It
+            is interpreted like scaling type 1, but taking into
+            account that distances between objects don't approximate
+            their euclidean distances.
+
+            See more details about distance and correlation biplots in
+            [1]_, \S 9.1.4.
+
+        Returns
+        -------
+        OrdinationResults
+            Object that stores the computed eigenvalues, the
+            proportion explained by each of them (per unit),
+            transformed coordinates for species and sites, biplot
+            scores, site constraints, etc.
+
+        See Also
+        --------
+        OrdinationResults
+
+        References
+        ----------
+
+        .. [1] Legendre P. and Legendre L. 1998. Numerical
+           Ecology. Elsevier, Amsterdam.
+
+        """
+        if scaling not in {1, 2}:
+            raise NotImplementedError("Only scalings 1, 2 available for RDA.")
+        # According to the vegan-FAQ.pdf, the scaling factor for scores
+        # is (notice that L&L 1998 says in p. 586 that such scaling
+        # doesn't affect the interpretation of a biplot):
+        eigvals = self.eigenvalues
+        const = np.sum(eigvals**2)**0.25
+        if scaling == 1:
+            scaling_factor = const
+        elif scaling == 2:
+            scaling_factor = eigvals / const
+        species_scores = np.hstack((self.U, self.U_res)) * scaling_factor
+        site_scores = np.hstack((self.F, self.F_res)) / scaling_factor
+        # TODO not yet used/displayed
+        site_constraints = np.hstack((self.Z, self.F_res)) / scaling_factor
+        # vegan seems to compute them as corr(self.X[:, :rank_X],
+        # self.u) but I don't think that's a good idea. In fact, if
+        # you take the example shown in Figure 11.3 in L&L 1998 you
+        # can see that there's an arrow for each of the 4
+        # environmental variables (depth, coral, sand, other) even if
+        # other = not(coral or sand)
+        biplot_scores = corr(self.X, self.u)
+        # The "Correlations of environmental variables with site
+        # scores" from table 11.4 are quite similar to vegan's biplot
+        # scores, but they're computed like this:
+        # corr(self.X, self.F))
+        return OrdinationResults(eigvals=eigvals,
+                                 proportion_explained=eigvals / eigvals.sum(),
+                                 species=species_scores,
+                                 site=site_scores,
+                                 biplot=biplot_scores,
+                                 site_constraints=site_constraints,
+                                 site_ids=self.site_ids,
+                                 species_ids=self.species_ids)
diff --git a/skbio/stats/ordination/_utils.py b/skbio/stats/ordination/_utils.py
new file mode 100644
index 0000000..16d92a2
--- /dev/null
+++ b/skbio/stats/ordination/_utils.py
@@ -0,0 +1,223 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+import numpy.testing as npt
+
+
+def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
+                 ddof=0):
+    """Compute the weighted average and standard deviation along the
+    specified axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Calculate average and standard deviation of these values.
+    axis : int, optional
+        Axis along which the statistics are computed. The default is
+        to compute them on the flattened array.
+    weights : array_like, optional
+        An array of weights associated with the values in `a`. Each
+        value in `a` contributes to the average according to its
+        associated weight. The weights array can either be 1-D (in
+        which case its length must be the size of `a` along the given
+        axis) or of the same shape as `a`. If `weights=None`, then all
+        data in `a` are assumed to have a weight equal to one.
+    with_mean : bool, optional, defaults to True
+        Compute average if True.
+    with_std : bool, optional, defaults to True
+        Compute standard deviation if True.
+    ddof : int, optional, defaults to 0
+        It means delta degrees of freedom. Variance is calculated by
+        dividing by `n - ddof` (where `n` is the number of
+        elements). By default it computes the maximum likelyhood
+        estimator.
+    Returns
+    -------
+    average, std
+        Return the average and standard deviation along the specified
+        axis. If any of them was not required, returns `None` instead
+    """
+    if not (with_mean or with_std):
+        raise ValueError("Either the mean or standard deviation need to be"
+                         " computed.")
+    a = np.asarray(a)
+    if weights is None:
+        avg = a.mean(axis=axis) if with_mean else None
+        std = a.std(axis=axis, ddof=ddof) if with_std else None
+    else:
+        avg = np.average(a, axis=axis, weights=weights)
+        if with_std:
+            if axis is None:
+                variance = np.average((a - avg)**2, weights=weights)
+            else:
+                # Make sure that the subtraction to compute variance works for
+                # multidimensional arrays
+                a_rolled = np.rollaxis(a, axis)
+                # Numpy doesn't have a weighted std implementation, but this is
+                # stable and fast
+                variance = np.average((a_rolled - avg)**2, axis=0,
+                                      weights=weights)
+            if ddof != 0:  # Don't waste time if variance doesn't need scaling
+                if axis is None:
+                    variance *= a.size / (a.size - ddof)
+                else:
+                    variance *= a.shape[axis] / (a.shape[axis] - ddof)
+            std = np.sqrt(variance)
+        else:
+            std = None
+        avg = avg if with_mean else None
+    return avg, std
+
+
+def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
+    """Scale array by columns to have weighted average 0 and standard
+    deviation 1.
+
+    Parameters
+    ----------
+    a : array_like
+        2D array whose columns are standardized according to the
+        weights.
+    weights : array_like, optional
+        Array of weights associated with the columns of `a`. By
+        default, the scaling is unweighted.
+    with_mean : bool, optional, defaults to True
+        Center columns to have 0 weighted mean.
+    with_std : bool, optional, defaults to True
+        Scale columns to have unit weighted std.
+    ddof : int, optional, defaults to 0
+        If with_std is True, variance is calculated by dividing by `n
+        - ddof` (where `n` is the number of elements). By default it
+        computes the maximum likelyhood stimator.
+    copy : bool, optional, defaults to True
+        Whether to perform the standardization in place, or return a
+        new copy of `a`.
+
+    Returns
+    -------
+    2D ndarray
+        Scaled array.
+
+    Notes
+    -----
+    Wherever std equals 0, it is replaced by 1 in order to avoid
+    division by zero.
+    """
+    if copy:
+        a = a.copy()
+    avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
+                            with_std=with_std, ddof=ddof)
+    if with_mean:
+        a -= avg
+    if with_std:
+        std[std == 0] = 1.0
+        a /= std
+    return a
+
+
+def svd_rank(M_shape, S, tol=None):
+    """Matrix rank of `M` given its singular values `S`.
+
+    See `np.linalg.matrix_rank` for a rationale on the tolerance
+    (we're not using that function because it doesn't let us reuse a
+    precomputed SVD)."""
+    if tol is None:
+        tol = S.max() * max(M_shape) * np.finfo(S.dtype).eps
+    return np.sum(S > tol)
+
+
+def corr(x, y=None):
+    """Computes correlation between columns of `x`, or `x` and `y`.
+
+    Correlation is covariance of (columnwise) standardized matrices,
+    so each matrix is first centered and scaled to have variance one,
+    and then their covariance is computed.
+
+    Parameters
+    ----------
+    x : 2D array_like
+        Matrix of shape (n, p). Correlation between its columns will
+        be computed.
+    y : 2D array_like, optional
+        Matrix of shape (n, q). If provided, the correlation is
+        computed between the columns of `x` and the columns of
+        `y`. Else, it's computed between the columns of `x`.
+
+    Returns
+    -------
+    correlation
+        Matrix of computed correlations. Has shape (p, p) if `y` is
+        not provided, else has shape (p, q).
+    """
+    x = np.asarray(x)
+    if y is not None:
+        y = np.asarray(y)
+        if y.shape[0] != x.shape[0]:
+            raise ValueError("Both matrices must have the same number of rows")
+        x, y = scale(x), scale(y)
+    else:
+        x = scale(x)
+        y = x
+    # Notice that scaling was performed with ddof=0 (dividing by n,
+    # the default), so now we need to remove it by also using ddof=0
+    # (dividing by n)
+    return x.T.dot(y) / x.shape[0]
+
+
+def assert_ordination_results_equal(left, right):
+    """Assert that ordination results objects are equal.
+
+    This is a helper function intended to be used in unit tests that need to
+    compare ``OrdinationResults`` objects.
+
+    For numeric attributes (e.g., eigvals, site, etc.),
+    ``numpy.testing.assert_almost_equal`` is used. Otherwise,
+    ``numpy.testing.assert_equal`` is used for comparisons. An assertion is
+    in place to ensure the two objects are exactly the same type.
+
+    Parameters
+    ----------
+    left, right : OrdinationResults
+        Ordination results to be compared for equality.
+
+    Raises
+    ------
+    AssertionError
+        If the two objects are not equal.
+
+    """
+    npt.assert_equal(type(left) is type(right), True)
+
+    # eigvals should always be present
+    npt.assert_almost_equal(left.eigvals, right.eigvals)
+
+    # these attributes are strings, so can compare directly, even if one or
+    # both are None
+    npt.assert_equal(left.species_ids, right.species_ids)
+    npt.assert_equal(left.site_ids, right.site_ids)
+
+    # these attributes need to be checked that they are almost equal, but one
+    # or both can be None, which npt.assert_almost_equal doesn't like
+    _assert_optional_numeric_attr_equal(left.species, right.species)
+    _assert_optional_numeric_attr_equal(left.site, right.site)
+    _assert_optional_numeric_attr_equal(left.biplot, right.biplot)
+    _assert_optional_numeric_attr_equal(left.site_constraints,
+                                        right.site_constraints)
+    _assert_optional_numeric_attr_equal(left.proportion_explained,
+                                        right.proportion_explained)
+
+
+def _assert_optional_numeric_attr_equal(left, right):
+    if left is None or right is None:
+        npt.assert_equal(left, right)
+    else:
+        npt.assert_almost_equal(left, right)
diff --git a/skbio/stats/ordination/tests/__init__.py b/skbio/stats/ordination/tests/__init__.py
new file mode 100644
index 0000000..0bf0c55
--- /dev/null
+++ b/skbio/stats/ordination/tests/__init__.py
@@ -0,0 +1,7 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/stats/ordination/tests/data/L&L_CA_data b/skbio/stats/ordination/tests/data/L&L_CA_data
new file mode 100644
index 0000000..28e8dd9
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/L&L_CA_data
@@ -0,0 +1,3 @@
+10 10 20
+10 15 10
+15 5 5
\ No newline at end of file
diff --git a/skbio/stats/ordination/tests/data/PCoA_sample_data b/skbio/stats/ordination/tests/data/PCoA_sample_data
new file mode 100644
index 0000000..dafa78b
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/PCoA_sample_data
@@ -0,0 +1,14 @@
+0 0.099 0.033 0.183 0.148 0.198 0.462 0.628 0.113 0.173 0.434 0.762 0.53 0.586
+0.099 0 0.022 0.114 0.224 0.039 0.266 0.442 0.07 0.119 0.419 0.633 0.389 0.435
+0.033 0.022 0 0.042 0.059 0.053 0.322 0.444 0.046 0.162 0.339 0.781 0.482 0.55 
+0.183 0.114 0.042 0 0.068 0.085 0.435 0.406 0.047 0.331 0.505 0.7 0.579 0.53
+0.148 0.224 0.059 0.068 0 0.051 0.268 0.24 0.034 0.177 0.469 0.758 0.597 0.552
+0.198 0.039 0.053 0.085 0.051 0 0.025 0.129 0.002 0.039 0.39 0.625 0.498 0.509
+0.462 0.266 0.322 0.435 0.268 0.025 0 0.014 0.106 0.089 0.315 0.469 0.374 0.369
+0.628 0.442 0.444 0.406 0.24 0.129 0.014 0 0.129 0.237 0.349 0.618 0.562 0.471
+0.113 0.07 0.046 0.047 0.034 0.002 0.106 0.129 0 0.071 0.151 0.44 0.247 0.234
+0.173 0.119 0.162 0.331 0.177 0.039 0.089 0.237 0.071 0 0.43 0.538 0.383 0.346
+0.434 0.419 0.339 0.505 0.469 0.39 0.315 0.349 0.151 0.43 0 0.607 0.387 0.456
+0.762 0.633 0.781 0.7 0.758 0.625 0.469 0.618 0.44 0.538 0.607 0 0.084 0.09
+0.53 0.389 0.482 0.579 0.597 0.498 0.374 0.562 0.247 0.383 0.387 0.084 0 0.038
+0.586 0.435 0.55 0.53 0.552 0.509 0.369 0.471 0.234 0.346 0.456 0.09 0.038 0
diff --git a/skbio/stats/ordination/tests/data/PCoA_sample_data_2 b/skbio/stats/ordination/tests/data/PCoA_sample_data_2
new file mode 100644
index 0000000..daeff96
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/PCoA_sample_data_2
@@ -0,0 +1,6 @@
+0.000000000000000000e+00 7.123610999999999693e-01 7.684919799999999634e-01 8.001856299999999811e-01 6.824852399999999930e-01 7.463462899999999678e-01
+7.123610999999999693e-01 0.000000000000000000e+00 8.664569100000000246e-01 8.048528200000000243e-01 8.338130099999999656e-01 7.388172600000000312e-01
+7.684919799999999634e-01 8.664569100000000246e-01 0.000000000000000000e+00 8.230839599999999479e-01 7.745174599999999909e-01 7.649887199999999554e-01
+8.001856299999999811e-01 8.048528200000000243e-01 8.230839599999999479e-01 0.000000000000000000e+00 8.416736500000000509e-01 7.761436600000000130e-01
+6.824852399999999930e-01 8.338130099999999656e-01 7.745174599999999909e-01 8.416736500000000509e-01 0.000000000000000000e+00 7.266116299999999528e-01
+7.463462899999999678e-01 7.388172600000000312e-01 7.649887199999999554e-01 7.761436600000000130e-01 7.266116299999999528e-01 0.000000000000000000e+00
diff --git a/skbio/stats/ordination/tests/data/PCoA_sample_data_3 b/skbio/stats/ordination/tests/data/PCoA_sample_data_3
new file mode 100644
index 0000000..07e3cd9
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/PCoA_sample_data_3
@@ -0,0 +1,10 @@
+	PC.636	PC.635	PC.356	PC.481	PC.354	PC.593	PC.355	PC.607	PC.634
+PC.636	0.0	0.60737953696	0.748276547061	0.669767446745	0.747947798007	0.7377413135	0.70062016649	0.722772498498	0.581124366673
+PC.635	0.60737953696	0.0	0.72197480583	0.655979250216	0.70672669146	0.745154628257	0.745509981365	0.659269044234	0.558850879936
+PC.356	0.748276547061	0.72197480583	0.0	0.697796152694	0.610208881707	0.718770654329	0.615310834175	0.735768463546	0.788355481145
+PC.481	0.669767446745	0.655979250216	0.697796152694	0.0	0.595087696488	0.657351659549	0.647258584304	0.686601452856	0.663208945591
+PC.354	0.747947798007	0.70672669146	0.610208881707	0.595087696488	0.0	0.585097582096	0.599541095803	0.726258935536	0.781832289452
+PC.593	0.7377413135	0.745154628257	0.718770654329	0.657351659549	0.585097582096	0.0	0.660981656067	0.713741192899	0.763317384716
+PC.355	0.70062016649	0.745509981365	0.615310834175	0.647258584304	0.599541095803	0.660981656067	0.0	0.77062394744	0.753575681654
+PC.607	0.722772498498	0.659269044234	0.735768463546	0.686601452856	0.726258935536	0.713741192899	0.77062394744	0.0	0.726387392797
+PC.634	0.581124366673	0.558850879936	0.788355481145	0.663208945591	0.781832289452	0.763317384716	0.753575681654	0.726387392797	0.0
\ No newline at end of file
diff --git a/skbio/stats/ordination/tests/data/example2_X b/skbio/stats/ordination/tests/data/example2_X
new file mode 100644
index 0000000..6cb0d3c
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_X
@@ -0,0 +1,10 @@
+1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+2.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+3.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+4.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+5.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+6.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+7.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+8.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+9.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+1.000000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
diff --git a/skbio/stats/ordination/tests/data/example2_Y b/skbio/stats/ordination/tests/data/example2_Y
new file mode 100644
index 0000000..68e6583
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_Y
@@ -0,0 +1,12 @@
+1 0 0 0 0 0 
+0 0 0 0 0 0 
+0 1 0 0 0 0 
+11 4 0 0 8 1 
+11 5 17 7 0 0 
+9 6 0 0 6 2 
+9 7 13 10 0 0 
+7 8 0 0 4 3 
+7 9 10 13 0 0 
+5 10 0 0 2 4 
+
+
diff --git a/skbio/stats/ordination/tests/data/example2_site_scaling1_from_vegan b/skbio/stats/ordination/tests/data/example2_site_scaling1_from_vegan
new file mode 100644
index 0000000..37a3adf
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_site_scaling1_from_vegan
@@ -0,0 +1,10 @@
+-1.209385 0.9996717 0.2040803 -0.04377164 -0.2025459 -0.04174845 0.002251712
+-1.262748 1.114136 0.1333392 -5.659699e-17 2.478790e-16 0.0834969 -1.146481e-16
+-1.227255 1.030418 0.001456920 0.04377164 0.2025459 -0.04174845 -0.002251712
+-0.7091313 -1.234889 0.7539303 -0.3794874 0.05000171 6.953961e-17 0.0002503876
+2.414956 0.1515006 0.7018385 0.6747053 0.02580938 2.771828e-16 0.01835041
+-0.7149669 -1.032314 0.1993367 -0.1264958 0.01666724 5.423912e-17 8.346252e-05
+2.146560 0.1833668 -0.02417943 -0.04068089 -0.01574523 -3.100586e-16 -0.03978716
+-0.7208024 -0.8297385 -0.3552569 0.1264958 -0.01666724 7.539371e-18 -8.346252e-05
+2.009409 0.2450122 -0.7046952 -0.6340244 -0.01006415 2.966077e-16 0.02143675
+-0.726638 -0.6271632 -0.9098504 0.3794874 -0.05000171 9.260122e-17 -0.0002503876
diff --git a/skbio/stats/ordination/tests/data/example2_site_scaling2_from_vegan b/skbio/stats/ordination/tests/data/example2_site_scaling2_from_vegan
new file mode 100644
index 0000000..40b60d2
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_site_scaling2_from_vegan
@@ -0,0 +1,10 @@
+-1.488490 2.126756 0.7278053 -0.2272346 -3.841304 -2.304877 0.2600617
+-1.554168 2.370273 0.4755236 -2.938157e-16 4.701051e-15 4.609755 -1.324129e-14
+-1.510485 2.192167 0.005195769 0.2272346 3.841304 -2.304877 -0.2600617
+-0.8727866 -2.627171 2.688719 -1.970058 0.9482876 3.839191e-15 0.02891853
+2.972287 0.3223107 2.502946 3.502642 0.4894777 1.53029e-14 2.119383
+-0.8799689 -2.196201 0.7108885 -0.6566859 0.3160959 2.994471e-15 0.009639511
+2.641949 0.3901046 -0.08623036 -0.2111894 -0.2986100 -1.711793e-14 -4.595222
+-0.8871512 -1.765231 -1.266942 0.6566859 -0.3160959 4.162388e-16 -0.009639511
+2.473146 0.5212524 -2.513133 -3.291452 -0.1908677 1.637533e-14 2.475840
+-0.8943335 -1.334261 -3.244772 1.970058 -0.9482876 5.112392e-15 -0.02891853
diff --git a/skbio/stats/ordination/tests/data/example2_species_scaling1_from_vegan b/skbio/stats/ordination/tests/data/example2_species_scaling1_from_vegan
new file mode 100644
index 0000000..4304a82
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_species_scaling1_from_vegan
@@ -0,0 +1,6 @@
+1.700926 -3.648509 2.254854 0.03700915 2.285531 -3.992165 -0.9423084
+1.131309 -2.668483 -4.203715 -0.03700915 -2.285531 -3.992165 0.9423084
+4.183397 0.9492028 1.450372 3.89008 1.505626 2.78929e-14 3.804618
+3.105920 0.9508293 -1.474339 -3.319613 -1.214393 3.792181e-14 3.87476
+-0.6542268 -2.856405 1.655304 -2.139057 3.761467 -1.190933e-14 0.6985568
+-0.3552263 -1.215823 -1.449786 1.069529 -1.880733 -6.738172e-15 -0.3492784
diff --git a/skbio/stats/ordination/tests/data/example2_species_scaling2_from_vegan b/skbio/stats/ordination/tests/data/example2_species_scaling2_from_vegan
new file mode 100644
index 0000000..1ce3bfc
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example2_species_scaling2_from_vegan
@@ -0,0 +1,6 @@
+1.381987 -1.714964 0.6322725 0.007128982 0.1205124 -0.07231043 -0.00815886
+0.9191784 -1.254308 -1.178743 -0.007128982 -0.1205124 -0.07231043 0.00815886
+3.398972 0.4461683 0.4066916 0.7493367 0.07938928 5.052266e-16 0.03294182
+2.523533 0.4469328 -0.4134120 -0.639449 -0.064033 6.868811e-16 0.03354913
+-0.5315534 -1.342640 0.4641556 -0.4120414 0.1983362 -2.157147e-16 0.006048367
+-0.2886182 -0.5714919 -0.4065273 0.2060207 -0.0991681 -1.220491e-16 -0.003024184
diff --git a/skbio/stats/ordination/tests/data/example3_X b/skbio/stats/ordination/tests/data/example3_X
new file mode 100644
index 0000000..6cb0d3c
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_X
@@ -0,0 +1,10 @@
+1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+2.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+3.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+4.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+5.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+6.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+7.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+8.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+9.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+1.000000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
diff --git a/skbio/stats/ordination/tests/data/example3_Y b/skbio/stats/ordination/tests/data/example3_Y
new file mode 100644
index 0000000..6870d41
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_Y
@@ -0,0 +1,13 @@
+1 0 0 0 0 0 2 4 4 
+0 0 0 0 0 0 5 6 1
+0 1 0 0 0 0 0 2 3
+11 4 0 0 8 1 6 2 0
+11 5 17 7 0 0 6 6 2
+9 6 0 0 6 2 10 1 4
+9 7 13 10 0 0 4 5 4 
+7 8 0 0 4 3 6 6 4 
+7 9 10 13 0 0 6 2 0 
+5 10 0 0 2 4 0 1 3
+
+
+
diff --git a/skbio/stats/ordination/tests/data/example3_site_scaling1_from_vegan b/skbio/stats/ordination/tests/data/example3_site_scaling1_from_vegan
new file mode 100644
index 0000000..95638db
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_site_scaling1_from_vegan
@@ -0,0 +1,10 @@
+0.4299704 -1.332221 0.06167733 -0.3572215 -0.2011142 -0.07731854 0.02429630 -0.1270878 0.02376451
+0.3538402 -1.299809 -0.2660402 0.7744182 0.4005334 0.1242493 0.04692828 0.03178983 -0.02752649
+0.4615241 -1.362879 0.6006922 -0.8939302 -0.4323576 -0.1067479 -0.1383998 0.1694146 0.01148472
+0.6730478 0.4632174 -0.5265713 0.191154 -0.2064744 0.2191950 -0.1100927 0.0004795276 -0.000759659
+-0.5924553 -0.02607423 -0.1955122 -0.1757430 0.1842576 0.04821126 0.05713738 0.01150025 0.01773529
+0.631247 0.1986126 -0.1796544 0.08237334 -0.1075801 -0.2214295 0.1693559 0.01071407 -0.009033185
+-0.5775539 -0.03661717 0.03720809 -0.1208918 -0.02090970 -0.06021282 -0.06707516 -0.01326357 -0.03569931
+0.5731844 -0.04684914 0.1477290 -0.001621561 0.2366887 -0.1627582 -0.1456291 -0.005405227 0.01589872
+-0.6946946 0.2120237 0.1343200 0.3356701 -0.1885661 0.01122678 0.008563613 0.001461538 0.01912039
+0.6250085 0.4474569 0.7713248 -0.3674199 0.06804227 0.3033959 0.1048540 -0.008683241 -0.009463257
diff --git a/skbio/stats/ordination/tests/data/example3_site_scaling2_from_vegan b/skbio/stats/ordination/tests/data/example3_site_scaling2_from_vegan
new file mode 100644
index 0000000..9b5ee5a
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_site_scaling2_from_vegan
@@ -0,0 +1,10 @@
+0.7105873 -3.081668 0.2196514 -1.245288 -1.072935 -0.5062419 0.2441267 -3.631648 1.163119
+0.5847714 -3.006693 -0.9474487 2.699651 2.136829 0.81352 0.4715303 0.908423 -1.347244
+0.7627343 -3.152586 2.139244 -3.116275 -2.306609 -0.6989299 -1.390626 4.841176 0.562103
+1.112307 1.071506 -1.875277 0.6663702 -1.101532 1.435176 -1.106200 0.01370293 -0.03718039
+-0.9791168 -0.06031443 -0.6962774 -0.6126467 0.9830066 0.3156624 0.5741102 0.3286300 0.8680277
+1.043226 0.459427 -0.6398028 0.2871566 -0.5739354 -1.449806 1.70167 0.3061643 -0.442116
+-0.9544901 -0.08470217 0.1325091 -0.4214334 -0.1115523 -0.3942425 -0.673964 -0.3790186 -1.747250
+0.9472688 -0.1083706 0.5261072 -0.005652824 1.262724 -1.065657 -1.463266 -0.1544592 0.7781397
+-1.148082 0.4904493 0.4783537 1.170159 -1.005992 0.07350714 0.08604627 0.04176476 0.9358196
+1.032916 1.035049 2.746918 -1.280840 0.3630026 1.986480 1.053561 -0.2481314 -0.4631652
diff --git a/skbio/stats/ordination/tests/data/example3_species_scaling1_from_vegan b/skbio/stats/ordination/tests/data/example3_species_scaling1_from_vegan
new file mode 100644
index 0000000..ff11b1d
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_species_scaling1_from_vegan
@@ -0,0 +1,9 @@
+0.1823706 0.6532424 -0.7230468 -0.006709309 -0.4387088 0.5613251 -0.1226234 -1.215044 0.2284292
+0.2336159 0.7020405 1.408284 -0.4924596 -0.1434352 0.937947 0.4323209 1.361292 -0.1116756
+-1.678295 0.2216763 -0.7060727 -0.3653404 0.6936791 0.1597882 0.4669268 0.7694895 1.713529
+-1.712479 0.2535825 0.7869925 0.7796179 -1.300374 -0.1696223 -0.5366468 -0.9018874 1.255149
+1.741418 1.242592 -1.560116 0.7790618 -1.728251 0.8161103 -1.198472 1.189485 -1.869424
+1.650260 1.327675 2.421391 -1.359427 1.595568 2.150528 2.131749 -2.371434 -2.151822
+0.4218299 -0.4121355 -0.7269577 1.510841 0.3772344 -1.23206 1.275166 0.1284123 -0.5996156
+0.2422044 -1.983235 -0.05430946 0.1839251 1.891113 -0.2729081 -1.999644 -0.06107332 -0.3832015
+0.6837066 -1.637614 0.7681627 -2.406462 -0.7918731 -2.188509 -0.06317185 -0.1040515 -0.6006456
diff --git a/skbio/stats/ordination/tests/data/example3_species_scaling2_from_vegan b/skbio/stats/ordination/tests/data/example3_species_scaling2_from_vegan
new file mode 100644
index 0000000..a33ff9f
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/example3_species_scaling2_from_vegan
@@ -0,0 +1,9 @@
+0.1103509 0.2824 -0.2030290 -0.001924623 -0.08223286 0.08573143 -0.01220389 -0.04251988 0.004667199
+0.1413590 0.3034956 0.3954412 -0.1412663 -0.02688592 0.1432531 0.04302603 0.04763777 -0.002281724
+-1.015522 0.09583179 -0.1982627 -0.1048010 0.1300252 0.02440453 0.04647012 0.02692792 0.03501032
+-1.036207 0.1096250 0.2209847 0.2236401 -0.2437459 -0.02590649 -0.05340889 -0.03156112 0.02564484
+1.053717 0.5371787 -0.4380751 0.2234806 -0.3239485 0.1246449 -0.1192759 0.04162547 -0.03819552
+0.9985587 0.5739606 0.6799181 -0.3899634 0.2990779 0.328451 0.2121588 -0.08298719 -0.0439654
+0.2552457 -0.1781683 -0.2041272 0.4333976 0.07070992 -0.1881731 0.1269088 0.004493729 -0.01225117
+0.1465559 -0.8573625 -0.01524991 0.0527605 0.3544758 -0.04168137 -0.1990112 -0.002137232 -0.007829461
+0.4137051 -0.707949 0.2156974 -0.6903142 -0.148431 -0.3342519 -0.006287074 -0.003641234 -0.01227222
diff --git a/skbio/stats/ordination/tests/data/exp_PCoAEigenResults_site b/skbio/stats/ordination/tests/data/exp_PCoAEigenResults_site
new file mode 100644
index 0000000..3b61dc3
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/exp_PCoAEigenResults_site
@@ -0,0 +1,9 @@
+-2.584654599999999802e-01 1.739995500000000028e-01 3.828758000000000161e-02 -1.944775099999999923e-01 8.311759999999999982e-02 2.624303299999999894e-01 -2.316363999999999923e-02 -1.847939999999999999e-02 0.000000000000000000e+00
+-2.710011399999999737e-01 -1.859513000000000138e-02 -8.648419000000000245e-02 1.180642499999999956e-01 -1.988083599999999895e-01 -2.117236000000000118e-02 -1.910240300000000113e-01 1.556465900000000013e-01 0.000000000000000000e+00
+2.350779000000000063e-01 9.625192999999999943e-02 -3.457927299999999926e-01 -3.208630000000000076e-03 -9.637777000000000149e-02 4.570253999999999983e-02 1.854728099999999880e-01 4.040939999999999810e-02 0.000000000000000000e+00
+2.614077000000000067e-02 -1.114596999999999984e-02 1.476606000000000030e-01 2.908766099999999799e-01 2.039454699999999898e-01 6.197123999999999688e-02 1.016413300000000020e-01 1.056909999999999933e-01 0.000000000000000000e+00
+2.850075499999999984e-01 -1.925498999999999961e-02 6.232634000000000091e-02 1.381267999999999940e-01 -1.047986000000000056e-01 9.517206999999999750e-02 -1.296361000000000041e-01 -2.206871699999999881e-01 0.000000000000000000e+00
+2.046363300000000052e-01 -1.393611499999999892e-01 2.915138199999999791e-01 -1.815667900000000057e-01 -1.595801299999999867e-01 -2.464121000000000020e-02 8.662524000000000612e-02 9.962215000000000653e-02 0.000000000000000000e+00
+2.334824000000000066e-01 2.252579700000000018e-01 -1.886231000000000019e-02 -1.077299800000000030e-01 1.771089999999999887e-01 -1.929058399999999951e-01 -1.498194700000000101e-01 3.835489999999999733e-02 0.000000000000000000e+00
+-9.496319000000000288e-02 -4.209747999999999823e-01 -1.548694499999999918e-01 -8.984274999999999900e-02 1.526181899999999869e-01 -3.342326999999999798e-02 -2.512247999999999912e-02 -5.089885000000000242e-02 0.000000000000000000e+00
+-3.599151600000000117e-01 1.138225999999999960e-01 6.622034000000000253e-02 2.975799999999999973e-02 -5.722540999999999739e-02 -1.931335100000000082e-01 1.450263300000000088e-01 -1.496586099999999975e-01 0.000000000000000000e+00
diff --git a/skbio/stats/ordination/tests/data/exp_PCoAzeros_site b/skbio/stats/ordination/tests/data/exp_PCoAzeros_site
new file mode 100644
index 0000000..bba4e46
--- /dev/null
+++ b/skbio/stats/ordination/tests/data/exp_PCoAzeros_site
@@ -0,0 +1,14 @@
+2.407881329999999875e-01 2.336771619999999938e-01 2.142755920000000147e-02 2.933648560000000002e-02 2.323964740000000057e-02 7.204402660000000513e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+1.136560330000000035e-01 1.167860260000000011e-01 4.047749600000000170e-02 4.171719389999999777e-02 5.458618300000000340e-02 5.687754019999999994e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+2.393598090000000067e-01 7.600313170000000396e-02 8.108063910000000429e-02 2.004822419999999911e-02 4.581686060000000373e-03 6.111682289999999934e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+2.129341230000000029e-01 6.047901669999999896e-02 1.108399510000000060e-01 1.568068159999999878e-01 4.018964929999999797e-02 1.244166470000000052e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+2.494895480000000054e-01 6.933176589999999473e-02 6.654290229999999806e-02 1.767019729999999914e-02 6.618815479999999585e-02 1.519420850000000045e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+1.487285489999999877e-01 7.783569120000000119e-02 5.321890979999999854e-02 3.189647279999999890e-02 8.112682020000000493e-02 2.872780789999999917e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+5.139397400000000199e-02 1.623059860000000132e-01 2.992259149999999986e-03 1.115089660000000010e-01 3.296634109999999757e-02 2.308574460000000091e-04 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+1.153621120000000028e-02 3.446314899999999848e-01 1.169549180000000084e-02 3.466400010000000170e-02 1.169378239999999992e-02 1.366916789999999930e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+3.932616380000000106e-03 5.908732849999999946e-03 2.629546940000000027e-02 7.368117420000000473e-02 1.939265150000000007e-02 2.257115020000000083e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+3.856929369999999996e-02 8.874156730000000784e-03 8.037018469999999970e-02 1.252655340000000117e-01 6.270869140000000230e-02 9.891935890000000051e-03 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+4.211582109999999907e-02 5.655546539999999761e-02 3.122599229999999948e-01 4.320305709999999744e-02 3.457770230000000149e-03 2.089115019999999989e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+5.158303490000000213e-01 2.909775239999999943e-02 9.484347120000000131e-02 2.340326009999999998e-02 3.819325810000000210e-02 4.608673130000000284e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+3.180272690000000013e-01 1.500964499999999924e-01 6.675638650000000063e-02 3.818534149999999738e-02 1.598195930000000098e-02 3.005567279999999883e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+3.237616709999999731e-01 4.748628319999999986e-02 5.282383000000000217e-02 4.852971259999999676e-02 7.869736770000000659e-02 4.101528460000000015e-02 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
diff --git a/skbio/stats/ordination/tests/test_ordination.py b/skbio/stats/ordination/tests/test_ordination.py
new file mode 100644
index 0000000..d377a56
--- /dev/null
+++ b/skbio/stats/ordination/tests/test_ordination.py
@@ -0,0 +1,896 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from six import binary_type, text_type, StringIO
+
+import warnings
+import unittest
+
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from IPython.core.display import Image, SVG
+from nose.tools import assert_is_instance, assert_raises_regexp, assert_true
+from scipy.spatial.distance import pdist
+
+from skbio import DistanceMatrix
+from skbio.stats.ordination import (
+    CA, RDA, CCA, PCoA, OrdinationResults, corr, mean_and_std,
+    assert_ordination_results_equal)
+from skbio.util import get_data_path
+
+
+def normalize_signs(arr1, arr2):
+    """Change column signs so that "column" and "-column" compare equal.
+
+    This is needed because results of eigenproblmes can have signs
+    flipped, but they're still right.
+
+    Notes
+    =====
+
+    This function tries hard to make sure that, if you find "column"
+    and "-column" almost equal, calling a function like np.allclose to
+    compare them after calling `normalize_signs` succeeds.
+
+    To do so, it distinguishes two cases for every column:
+
+    - It can be all almost equal to 0 (this includes a column of
+      zeros).
+    - Otherwise, it has a value that isn't close to 0.
+
+    In the first case, no sign needs to be flipped. I.e., for
+    |epsilon| small, np.allclose(-epsilon, 0) is true if and only if
+    np.allclose(epsilon, 0) is.
+
+    In the second case, the function finds the number in the column
+    whose absolute value is largest. Then, it compares its sign with
+    the number found in the same index, but in the other array, and
+    flips the sign of the column as needed.
+    """
+    # Let's convert everyting to floating point numbers (it's
+    # reasonable to assume that eigenvectors will already be floating
+    # point numbers). This is necessary because np.array(1) /
+    # np.array(0) != np.array(1.) / np.array(0.)
+    arr1 = np.asarray(arr1, dtype=np.float64)
+    arr2 = np.asarray(arr2, dtype=np.float64)
+
+    if arr1.shape != arr2.shape:
+        raise ValueError(
+            "Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
+                                                                   arr2.shape)
+            )
+
+    # To avoid issues around zero, we'll compare signs of the values
+    # with highest absolute value
+    max_idx = np.abs(arr1).argmax(axis=0)
+    max_arr1 = arr1[max_idx, range(arr1.shape[1])]
+    max_arr2 = arr2[max_idx, range(arr2.shape[1])]
+
+    sign_arr1 = np.sign(max_arr1)
+    sign_arr2 = np.sign(max_arr2)
+
+    # Store current warnings, and ignore division by zero (like 1. /
+    # 0.) and invalid operations (like 0. / 0.)
+    wrn = np.seterr(invalid='ignore', divide='ignore')
+    differences = sign_arr1 / sign_arr2
+    # The values in `differences` can be:
+    #    1 -> equal signs
+    #   -1 -> diff signs
+    #   Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
+    np.seterr(**wrn)
+
+    # Now let's deal with cases where `differences != \pm 1`
+    special_cases = (~np.isfinite(differences)) | (differences == 0)
+    # In any of these cases, the sign of the column doesn't matter, so
+    # let's just keep it
+    differences[special_cases] = 1
+
+    return arr1 * differences, arr2
+
+
+def chi_square_distance(data_table, between_rows=True):
+    """Computes the chi-square distance between two rows or columns of input.
+
+    It is a measure that has no upper limit, and it excludes double-zeros.
+
+    Parameters
+    ----------
+    data_table : 2D array_like
+        An array_like object of shape (n, p). The input must be a
+        frequency table (so that the sum of all cells equals 1, and
+        all values are non-negative).
+    between_rows : bool (defaults to True)
+        Indicates whether distance is computed between rows (default)
+        or columns.
+
+    Returns
+    -------
+    Y : ndarray
+        Returns a condensed distance matrix. For each i and j (where
+        i<j<n), the chi square distance between u=X[i] and v=X[j] is
+        computed and stored in `Y[(n choose 2) - (n - i choose 2) + (j
+        - i - 1)]`.
+
+    See Also
+    --------
+    scipy.spatial.distance.squareform
+
+    References
+    ----------
+    This coefficient appears in Legendre and Legendre (1998) as
+    formula 7.54 (as D_{16}). Another source is
+    http://www.springerreference.com/docs/html/chapterdbid/60817.html
+    """
+    data_table = np.asarray(data_table, dtype=np.float64)
+    if not np.allclose(data_table.sum(), 1):
+        raise ValueError("Input is not a frequency table: if it is an"
+                         " abundance table you could scale it as"
+                         " `data_table / data_table.sum()`.")
+    if np.any(data_table < 0):
+        raise ValueError("A frequency table can't have negative values.")
+
+    # The distances are always computed between the rows of F
+    F = data_table if between_rows else data_table.T
+
+    row_sums = F.sum(axis=1, keepdims=True)
+    column_sums = F.sum(axis=0)
+    scaled_F = F / (row_sums * np.sqrt(column_sums))
+
+    return pdist(scaled_F, 'euclidean')
+
+
+class TestNormalizeSigns(object):
+    def test_shapes_and_nonarray_input(self):
+        with npt.assert_raises(ValueError):
+            normalize_signs([[1, 2], [3, 5]], [[1, 2]])
+
+    def test_works_when_different(self):
+        """Taking abs value of everything would lead to false
+        positives."""
+        a = np.array([[1, -1],
+                      [2, 2]])
+        b = np.array([[-1, -1],
+                      [2, 2]])
+        with npt.assert_raises(AssertionError):
+            npt.assert_equal(*normalize_signs(a, b))
+
+    def test_easy_different(self):
+        a = np.array([[1, 2],
+                      [3, -1]])
+        b = np.array([[-1, 2],
+                      [-3, -1]])
+        npt.assert_equal(*normalize_signs(a, b))
+
+    def test_easy_already_equal(self):
+        a = np.array([[1, -2],
+                      [3, 1]])
+        b = a.copy()
+        npt.assert_equal(*normalize_signs(a, b))
+
+    def test_zeros(self):
+        a = np.array([[0, 3],
+                      [0, -1]])
+        b = np.array([[0, -3],
+                      [0, 1]])
+        npt.assert_equal(*normalize_signs(a, b))
+
+    def test_hard(self):
+        a = np.array([[0, 1],
+                      [1, 2]])
+        b = np.array([[0, 1],
+                      [-1, 2]])
+        npt.assert_equal(*normalize_signs(a, b))
+
+    def test_harder(self):
+        """We don't want a value that might be negative due to
+        floating point inaccuracies to make a call to allclose in the
+        result to be off."""
+        a = np.array([[-1e-15, 1],
+                      [5, 2]])
+        b = np.array([[1e-15, 1],
+                      [5, 2]])
+        # Clearly a and b would refer to the same "column
+        # eigenvectors" but a slopppy implementation of
+        # normalize_signs could change the sign of column 0 and make a
+        # comparison fail
+        npt.assert_almost_equal(*normalize_signs(a, b))
+
+    def test_column_zeros(self):
+        a = np.array([[0, 1],
+                      [0, 2]])
+        b = np.array([[0, -1],
+                      [0, -2]])
+        npt.assert_equal(*normalize_signs(a, b))
+
+    def test_column_almost_zero(self):
+        a = np.array([[1e-15, 3],
+                      [-2e-14, -6]])
+        b = np.array([[0, 3],
+                      [-1e-15, -6]])
+        npt.assert_almost_equal(*normalize_signs(a, b))
+
+
+class TestChiSquareDistance(object):
+    def test_errors(self):
+        a = np.array([[-0.5, 0],
+                      [1, 0.5]])
+        with npt.assert_raises(ValueError):
+            chi_square_distance(a)
+        b = np.array([[0.5, 0],
+                      [0.5, 0.1]])
+        with npt.assert_raises(ValueError):
+            chi_square_distance(b)
+
+    def test_results(self):
+        """Some random numbers."""
+        a = np.array([[0.02808988764,  0.056179775281,  0.084269662921,
+                       0.140449438202],
+                      [0.01404494382,  0.196629213483,  0.109550561798,
+                       0.033707865169],
+                      [0.02808988764,  0.112359550562,  0.056179775281,
+                       0.140449438202]])
+        dist = chi_square_distance(a)
+        expected = [0.91413919964333856,
+                    0.33651110106124049,
+                    0.75656884966269089]
+        npt.assert_almost_equal(dist, expected)
+
+    def test_results2(self):
+        """A tiny example from Legendre & Legendre 1998, p. 285."""
+        a = np.array([[0, 1, 1],
+                      [1, 0, 0],
+                      [0, 4, 4]])
+        dist = chi_square_distance(a / a.sum())
+        # Note L&L used a terrible calculator because they got a wrong
+        # number (says it's 3.477) :(
+        expected = [3.4785054261852175, 0, 3.4785054261852175]
+        npt.assert_almost_equal(dist, expected)
+
+
+class TestUtils(object):
+    def setup(self):
+        self.x = np.array([[1, 2, 3], [4, 5, 6]])
+        self.y = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+
+    def test_mean_and_std_no_mean_no_std(self):
+        with npt.assert_raises(ValueError):
+            mean_and_std(self.x, with_mean=False, with_std=False)
+
+    def test_corr_shape_mismatch(self):
+        with npt.assert_raises(ValueError):
+            corr(self.x, self.y)
+
+    def test_assert_ordination_results_equal(self):
+        minimal1 = OrdinationResults([1, 2])
+
+        # a minimal set of results should be equal to itself
+        assert_ordination_results_equal(minimal1, minimal1)
+
+        # type mismatch
+        with npt.assert_raises(AssertionError):
+            assert_ordination_results_equal(minimal1, 'foo')
+
+        # numeric values should be checked that they're almost equal
+        almost_minimal1 = OrdinationResults([1.0000001, 1.9999999])
+        assert_ordination_results_equal(minimal1, almost_minimal1)
+
+        # species_ids missing in one, present in the other
+        almost_minimal1.species_ids = ['abc', 'def']
+        with npt.assert_raises(AssertionError):
+            assert_ordination_results_equal(minimal1, almost_minimal1)
+        almost_minimal1.species_ids = None
+
+        # site_ids missing in one, present in the other
+        almost_minimal1.site_ids = ['abc', 'def']
+        with npt.assert_raises(AssertionError):
+            assert_ordination_results_equal(minimal1, almost_minimal1)
+        almost_minimal1.site_ids = None
+
+        # test each of the optional numeric attributes
+        for attr in ('species', 'site', 'biplot', 'site_constraints',
+                     'proportion_explained'):
+            # missing optional numeric attribute in one, present in the other
+            setattr(almost_minimal1, attr, [[1, 2], [3, 4]])
+            with npt.assert_raises(AssertionError):
+                assert_ordination_results_equal(minimal1, almost_minimal1)
+            setattr(almost_minimal1, attr, None)
+
+            # optional numeric attributes present in both, but not almost equal
+            setattr(minimal1, attr, [[1, 2], [3, 4]])
+            setattr(almost_minimal1, attr, [[1, 2], [3.00002, 4]])
+            with npt.assert_raises(AssertionError):
+                assert_ordination_results_equal(minimal1, almost_minimal1)
+            setattr(minimal1, attr, None)
+            setattr(almost_minimal1, attr, None)
+
+            # optional numeric attributes present in both, and almost equal
+            setattr(minimal1, attr, [[1, 2], [3, 4]])
+            setattr(almost_minimal1, attr, [[1, 2], [3.00000002, 4]])
+            assert_ordination_results_equal(minimal1, almost_minimal1)
+            setattr(minimal1, attr, None)
+            setattr(almost_minimal1, attr, None)
+
+
+class TestCAResults(object):
+    def setup(self):
+        """Data from table 9.11 in Legendre & Legendre 1998."""
+        self.X = np.loadtxt(get_data_path('L&L_CA_data'))
+        self.ordination = CA(self.X, ['Site1', 'Site2', 'Site3'],
+                             ['Species1', 'Species2', 'Species3'])
+
+    def test_scaling2(self):
+        scores = self.ordination.scores(scaling=2)
+        # p. 460 L&L 1998
+        F_hat = np.array([[0.40887, -0.06955],
+                          [-0.11539,  0.29977],
+                          [-0.30997, -0.18739]])
+        npt.assert_almost_equal(*normalize_signs(F_hat, scores.species),
+                                decimal=5)
+        V_hat = np.array([[-0.84896, -0.88276],
+                          [-0.22046,  1.34482],
+                          [1.66697, -0.47032]])
+        npt.assert_almost_equal(*normalize_signs(V_hat, scores.site),
+                                decimal=5)
+
+    def test_scaling1(self):
+        scores = self.ordination.scores(scaling=1)
+        # p. 458
+        V = np.array([[1.31871, -0.34374],
+                      [-0.37215,  1.48150],
+                      [-0.99972, -0.92612]])
+        npt.assert_almost_equal(*normalize_signs(V, scores.species), decimal=5)
+        F = np.array([[-0.26322, -0.17862],
+                      [-0.06835,  0.27211],
+                      [0.51685, -0.09517]])
+        npt.assert_almost_equal(*normalize_signs(F, scores.site), decimal=5)
+
+    def test_maintain_chi_square_distance_scaling1(self):
+        """In scaling 1, chi^2 distance among rows (sites) is equal to
+        euclidean distance between them in transformed space."""
+        frequencies = self.X / self.X.sum()
+        chi2_distances = chi_square_distance(frequencies)
+        transformed_sites = self.ordination.scores(1).site
+        euclidean_distances = pdist(transformed_sites, 'euclidean')
+        npt.assert_almost_equal(chi2_distances, euclidean_distances)
+
+    def test_maintain_chi_square_distance_scaling2(self):
+        """In scaling 2, chi^2 distance among columns (species) is
+        equal to euclidean distance between them in transformed space."""
+        frequencies = self.X / self.X.sum()
+        chi2_distances = chi_square_distance(frequencies, between_rows=False)
+        transformed_species = self.ordination.scores(2).species
+        euclidean_distances = pdist(transformed_species, 'euclidean')
+        npt.assert_almost_equal(chi2_distances, euclidean_distances)
+
+
+class TestCAErrors(object):
+    def test_negative(self):
+        X = np.array([[1, 2], [-0.1, -2]])
+        with npt.assert_raises(ValueError):
+            CA(X, None, None)
+
+
+class TestRDAErrors(object):
+    def test_shape(self):
+        for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
+            Y = np.random.randn(n, p)
+            X = np.random.randn(n_, m)
+            yield npt.assert_raises, ValueError, RDA, Y, X, None, None
+
+
+class TestRDAResults(object):
+    # STATUS: L&L only shows results with scaling 1, and they agree
+    # with vegan's (module multiplying by a constant). I can also
+    # compute scaling 2, agreeing with vegan, but there are no written
+    # results in L&L.
+    def setup(self):
+        """Data from table 11.3 in Legendre & Legendre 1998."""
+        Y = np.loadtxt(get_data_path('example2_Y'))
+        X = np.loadtxt(get_data_path('example2_X'))
+        self.ordination = RDA(Y, X,
+                              ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
+                               'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
+                              ['Species0', 'Species1', 'Species2', 'Species3',
+                               'Species4', 'Species5'])
+
+    def test_scaling1(self):
+        scores = self.ordination.scores(1)
+
+        # Load data as computed with vegan 2.0-8
+        vegan_species = np.loadtxt(get_data_path(
+            'example2_species_scaling1_from_vegan'))
+        npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
+
+        vegan_site = np.loadtxt(get_data_path(
+            'example2_site_scaling1_from_vegan'))
+        npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
+
+    def test_scaling2(self):
+        scores = self.ordination.scores(2)
+
+        # Load data as computed with vegan 2.0-8
+        vegan_species = np.loadtxt(get_data_path(
+            'example2_species_scaling2_from_vegan'))
+        npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
+
+        vegan_site = np.loadtxt(get_data_path(
+            'example2_site_scaling2_from_vegan'))
+        npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
+
+
+class TestCCAErrors(object):
+    def setup(self):
+        """Data from table 11.3 in Legendre & Legendre 1998."""
+        self.Y = np.loadtxt(get_data_path('example3_Y'))
+        self.X = np.loadtxt(get_data_path('example3_X'))
+
+    def test_shape(self):
+        X, Y = self.X, self.Y
+        with npt.assert_raises(ValueError):
+            CCA(Y, X[:-1], None, None)
+
+    def test_Y_values(self):
+        X, Y = self.X, self.Y
+        Y[0, 0] = -1
+        with npt.assert_raises(ValueError):
+            CCA(Y, X, None, None)
+        Y[0] = 0
+        with npt.assert_raises(ValueError):
+            CCA(Y, X, None, None)
+
+
+class TestCCAResults(object):
+    def setup(self):
+        """Data from table 11.3 in Legendre & Legendre 1998
+        (p. 590). Loaded results as computed with vegan 2.0-8 and
+        compared with table 11.5 if also there."""
+        Y = np.loadtxt(get_data_path('example3_Y'))
+        X = np.loadtxt(get_data_path('example3_X'))
+        self.ordination = CCA(Y, X[:, :-1],
+                              ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
+                               'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
+                              ['Species0', 'Species1', 'Species2', 'Species3',
+                               'Species4', 'Species5', 'Species6', 'Species7',
+                               'Species8'])
+
+    def test_scaling1_species(self):
+        scores = self.ordination.scores(1)
+
+        vegan_species = np.loadtxt(get_data_path(
+            'example3_species_scaling1_from_vegan'))
+        npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
+
+    def test_scaling1_site(self):
+        scores = self.ordination.scores(1)
+
+        vegan_site = np.loadtxt(get_data_path(
+            'example3_site_scaling1_from_vegan'))
+        npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
+
+    def test_scaling2_species(self):
+        scores = self.ordination.scores(2)
+
+        vegan_species = np.loadtxt(get_data_path(
+            'example3_species_scaling2_from_vegan'))
+        npt.assert_almost_equal(scores.species, vegan_species, decimal=5)
+
+    def test_scaling2_site(self):
+        scores = self.ordination.scores(2)
+
+        vegan_site = np.loadtxt(get_data_path(
+            'example3_site_scaling2_from_vegan'))
+        npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
+
+
+class TestPCoAResults(object):
+    def setup(self):
+        """Sample data set from page 111 of W.J Krzanowski. Principles
+        of multivariate analysis, 2000, Oxford University Press."""
+        matrix = np.loadtxt(get_data_path('PCoA_sample_data'))
+        dist_matrix = DistanceMatrix(matrix, map(str, range(matrix.shape[0])))
+        self.dist_matrix = dist_matrix
+
+    def test_negative_eigenvalue_warning(self):
+        """This data has some small negative eigenvalues."""
+        npt.assert_warns(RuntimeWarning, PCoA, self.dist_matrix)
+
+    def test_values(self):
+        """Adapted from cogent's `test_principal_coordinate_analysis`:
+        "I took the example in the book (see intro info), and did the
+        principal coordinates analysis, plotted the data and it looked
+        right"."""
+        with warnings.catch_warnings():
+            warnings.filterwarnings('ignore', category=RuntimeWarning)
+            ordination = PCoA(self.dist_matrix)
+        scores = ordination.scores()
+
+        exp_eigvals = np.array([0.73599103, 0.26260032, 0.14926222, 0.06990457,
+                                0.02956972, 0.01931184, 0., 0., 0., 0., 0., 0.,
+                                0., 0.])
+        exp_site = np.loadtxt(get_data_path('exp_PCoAzeros_site'))
+        exp_prop_expl = np.array([0.58105792, 0.20732046, 0.1178411,
+                                  0.05518899, 0.02334502, 0.01524651, 0., 0.,
+                                  0., 0., 0., 0., 0., 0.])
+        exp_site_ids = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+                        '10', '11', '12', '13']
+        # Note the absolute value because column can have signs swapped
+        npt.assert_almost_equal(scores.eigvals, exp_eigvals)
+        npt.assert_almost_equal(np.abs(scores.site), exp_site)
+        npt.assert_almost_equal(scores.proportion_explained, exp_prop_expl)
+        npt.assert_equal(scores.site_ids, exp_site_ids)
+
+
+class TestPCoAResultsExtensive(object):
+    def setup(self):
+        matrix = np.loadtxt(get_data_path('PCoA_sample_data_2'))
+        self.ids = [str(i) for i in range(matrix.shape[0])]
+        dist_matrix = DistanceMatrix(matrix, self.ids)
+        self.ordination = PCoA(dist_matrix)
+
+    def test_values(self):
+        results = self.ordination.scores()
+
+        npt.assert_equal(len(results.eigvals), len(results.site[0]))
+
+        expected = np.array([[-0.028597, 0.22903853, 0.07055272,
+                              0.26163576, 0.28398669, 0.0],
+                             [0.37494056, 0.22334055, -0.20892914,
+                              0.05057395, -0.18710366, 0.0],
+                             [-0.33517593, -0.23855979, -0.3099887,
+                              0.11521787, -0.05021553, 0.0],
+                             [0.25412394, -0.4123464, 0.23343642,
+                              0.06403168, -0.00482608, 0.0],
+                             [-0.28256844, 0.18606911, 0.28875631,
+                              -0.06455635, -0.21141632, 0.0],
+                             [0.01727687, 0.012458, -0.07382761,
+                              -0.42690292, 0.1695749, 0.0]])
+        npt.assert_almost_equal(*normalize_signs(expected, results.site))
+
+        expected = np.array([0.3984635, 0.36405689, 0.28804535, 0.27479983,
+                            0.19165361, 0.0])
+        npt.assert_almost_equal(results.eigvals, expected)
+
+        expected = np.array([0.2626621381, 0.2399817314, 0.1898758748,
+                             0.1811445992, 0.1263356565, 0.0])
+        npt.assert_almost_equal(results.proportion_explained, expected)
+
+        npt.assert_equal(results.site_ids, self.ids)
+
+
+class TestPCoAEigenResults(object):
+    def setup(self):
+        dist_matrix = DistanceMatrix.read(get_data_path('PCoA_sample_data_3'))
+        self.ordination = PCoA(dist_matrix)
+
+        self.ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
+                    'PC.355', 'PC.607', 'PC.634']
+
+    def test_values(self):
+        results = self.ordination.scores()
+
+        npt.assert_almost_equal(len(results.eigvals), len(results.site[0]))
+
+        expected = np.loadtxt(get_data_path('exp_PCoAEigenResults_site'))
+        npt.assert_almost_equal(*normalize_signs(expected, results.site))
+
+        expected = np.array([0.51236726, 0.30071909, 0.26791207, 0.20898868,
+                             0.19169895, 0.16054235,  0.15017696,  0.12245775,
+                             0.0])
+        npt.assert_almost_equal(results.eigvals, expected)
+
+        expected = np.array([0.2675738328, 0.157044696, 0.1399118638,
+                             0.1091402725, 0.1001110485, 0.0838401162,
+                             0.0784269939, 0.0639511764, 0.0])
+        npt.assert_almost_equal(results.proportion_explained, expected)
+
+        npt.assert_equal(results.site_ids, self.ids)
+
+
+class TestPCoAPrivateMethods(object):
+    def setup(self):
+        self.matrix = np.arange(1, 7).reshape(2, 3)
+        self.matrix2 = np.arange(1, 10).reshape(3, 3)
+
+    def test_E_matrix(self):
+        E = PCoA._E_matrix(self.matrix)
+        expected_E = np.array([[-0.5,  -2.,  -4.5],
+                               [-8., -12.5, -18.]])
+        npt.assert_almost_equal(E, expected_E)
+
+    def test_F_matrix(self):
+        F = PCoA._F_matrix(self.matrix2)
+        expected_F = np.zeros((3, 3))
+        # Note that `test_make_F_matrix` in cogent is wrong
+        npt.assert_almost_equal(F, expected_F)
+
+
+class TestPCoAErrors(object):
+    def test_input(self):
+        with npt.assert_raises(TypeError):
+            PCoA([[1, 2], [3, 4]])
+
+
+class TestOrdinationResults(unittest.TestCase):
+    def setUp(self):
+        # Define in-memory CA results to serialize and deserialize.
+        eigvals = np.array([0.0961330159181, 0.0409418140138])
+        species = np.array([[0.408869425742, 0.0695518116298],
+                            [-0.1153860437, -0.299767683538],
+                            [-0.309967102571, 0.187391917117]])
+        site = np.array([[-0.848956053187, 0.882764759014],
+                         [-0.220458650578, -1.34482000302],
+                         [1.66697179591, 0.470324389808]])
+        biplot = None
+        site_constraints = None
+        prop_explained = None
+        species_ids = ['Species1', 'Species2', 'Species3']
+        site_ids = ['Site1', 'Site2', 'Site3']
+
+        self.ordination_results = OrdinationResults(
+            eigvals=eigvals, species=species, site=site, biplot=biplot,
+            site_constraints=site_constraints,
+            proportion_explained=prop_explained, species_ids=species_ids,
+            site_ids=site_ids)
+
+        # DataFrame for testing plot method. Has a categorical column with a
+        # mix of numbers and strings. Has a numeric column with a mix of ints,
+        # floats, and strings that can be converted to floats. Has a numeric
+        # column with missing data (np.nan).
+        self.df = pd.DataFrame([['foo', '42', 10],
+                                [22, 0, 8],
+                                [22, -4.2, np.nan],
+                                ['foo', '42.19', 11]],
+                               index=['A', 'B', 'C', 'D'],
+                               columns=['categorical', 'numeric', 'nancolumn'])
+
+        # Minimal ordination results for easier testing of plotting method.
+        # Paired with df above.
+        eigvals = np.array([0.50, 0.25, 0.25])
+        site = np.array([[0.1, 0.2, 0.3],
+                         [0.2, 0.3, 0.4],
+                         [0.3, 0.4, 0.5],
+                         [0.4, 0.5, 0.6]])
+        self.min_ord_results = OrdinationResults(eigvals=eigvals, site=site,
+                                                 site_ids=['A', 'B', 'C', 'D'])
+
+    def test_deprecated_io(self):
+        fh = StringIO()
+        npt.assert_warns(DeprecationWarning, self.ordination_results.to_file,
+                         fh)
+        fh.seek(0)
+        deserialized = npt.assert_warns(DeprecationWarning,
+                                        OrdinationResults.from_file, fh)
+        assert_ordination_results_equal(deserialized, self.ordination_results)
+        self.assertTrue(type(deserialized) == OrdinationResults)
+
+    def test_str(self):
+        exp = ("Ordination results:\n"
+               "\tEigvals: 2\n"
+               "\tProportion explained: N/A\n"
+               "\tSpecies: 3x2\n"
+               "\tSite: 3x2\n"
+               "\tBiplot: N/A\n"
+               "\tSite constraints: N/A\n"
+               "\tSpecies IDs: 'Species1', 'Species2', 'Species3'\n"
+               "\tSite IDs: 'Site1', 'Site2', 'Site3'")
+        obs = str(self.ordination_results)
+        self.assertEqual(obs, exp)
+
+        # all optional attributes missing
+        exp = ("Ordination results:\n"
+               "\tEigvals: 1\n"
+               "\tProportion explained: N/A\n"
+               "\tSpecies: N/A\n"
+               "\tSite: N/A\n"
+               "\tBiplot: N/A\n"
+               "\tSite constraints: N/A\n"
+               "\tSpecies IDs: N/A\n"
+               "\tSite IDs: N/A")
+        obs = str(OrdinationResults(np.array([4.2])))
+        self.assertEqual(obs, exp)
+
+    def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
+                                  exp_legend_exists, exp_xlabel, exp_ylabel,
+                                  exp_zlabel):
+        # check type
+        assert_is_instance(fig, mpl.figure.Figure)
+
+        # check number of subplots
+        axes = fig.get_axes()
+        npt.assert_equal(len(axes), exp_num_subplots)
+
+        # check title
+        ax = axes[0]
+        npt.assert_equal(ax.get_title(), exp_title)
+
+        # shouldn't have tick labels
+        for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
+                           ax.get_zticklabels()):
+            npt.assert_equal(tick_label.get_text(), '')
+
+        # check if legend is present
+        legend = ax.get_legend()
+        if exp_legend_exists:
+            assert_true(legend is not None)
+        else:
+            assert_true(legend is None)
+
+        # check axis labels
+        npt.assert_equal(ax.get_xlabel(), exp_xlabel)
+        npt.assert_equal(ax.get_ylabel(), exp_ylabel)
+        npt.assert_equal(ax.get_zlabel(), exp_zlabel)
+
+    def test_plot_no_metadata(self):
+        fig = self.min_ord_results.plot()
+        self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
+
+    def test_plot_with_numeric_metadata_and_plot_options(self):
+        fig = self.min_ord_results.plot(
+            self.df, 'numeric', axes=(1, 0, 2),
+            axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
+        self.check_basic_figure_sanity(
+            fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
+
+    def test_plot_with_categorical_metadata_and_plot_options(self):
+        fig = self.min_ord_results.plot(
+            self.df, 'categorical', axes=[2, 0, 1], title='a title',
+            cmap='Accent')
+        self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
+
+    def test_plot_with_invalid_axis_labels(self):
+        with assert_raises_regexp(ValueError, 'axis_labels.*4'):
+            self.min_ord_results.plot(axes=[2, 0, 1],
+                                      axis_labels=('a', 'b', 'c', 'd'))
+
+    def test_validate_plot_axes_valid_input(self):
+        # shouldn't raise an error on valid input. nothing is returned, so
+        # nothing to check here
+        self.min_ord_results._validate_plot_axes(self.min_ord_results.site.T,
+                                                 (1, 2, 0))
+
+    def test_validate_plot_axes_invalid_input(self):
+        # not enough dimensions
+        with assert_raises_regexp(ValueError, '2 dimension\(s\)'):
+            self.min_ord_results._validate_plot_axes(
+                np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
+
+        coord_matrix = self.min_ord_results.site.T
+
+        # wrong number of axes
+        with assert_raises_regexp(ValueError, 'exactly three.*found 0'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, [])
+        with assert_raises_regexp(ValueError, 'exactly three.*found 4'):
+            self.min_ord_results._validate_plot_axes(coord_matrix,
+                                                     (0, 1, 2, 3))
+
+        # duplicate axes
+        with assert_raises_regexp(ValueError, 'must be unique'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
+
+        # out of range axes
+        with assert_raises_regexp(ValueError, 'axes\[1\].*3'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
+        with assert_raises_regexp(ValueError, 'axes\[2\].*3'):
+            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
+
+    def test_get_plot_point_colors_invalid_input(self):
+        # column provided without df
+        with npt.assert_raises(ValueError):
+            self.min_ord_results._get_plot_point_colors(None, 'numeric',
+                                                        ['B', 'C'], 'jet')
+
+        # df provided without column
+        with npt.assert_raises(ValueError):
+            self.min_ord_results._get_plot_point_colors(self.df, None,
+                                                        ['B', 'C'], 'jet')
+
+        # column not in df
+        with assert_raises_regexp(ValueError, 'missingcol'):
+            self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
+                                                        ['B', 'C'], 'jet')
+
+        # id not in df
+        with assert_raises_regexp(ValueError, 'numeric'):
+            self.min_ord_results._get_plot_point_colors(
+                self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
+
+        # missing data in df
+        with assert_raises_regexp(ValueError, 'nancolumn'):
+            self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
+                                                        ['B', 'C', 'A'], 'jet')
+
+    def test_get_plot_point_colors_no_df_or_column(self):
+        obs = self.min_ord_results._get_plot_point_colors(None, None,
+                                                          ['B', 'C'], 'jet')
+        npt.assert_equal(obs, (None, None))
+
+    def test_get_plot_point_colors_numeric_column(self):
+        # subset of the ids in df
+        exp = [0.0, -4.2, 42.0]
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'numeric', ['B', 'C', 'A'], 'jet')
+        npt.assert_almost_equal(obs[0], exp)
+        assert_true(obs[1] is None)
+
+        # all ids in df
+        exp = [0.0, 42.0, 42.19, -4.2]
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
+        npt.assert_almost_equal(obs[0], exp)
+        assert_true(obs[1] is None)
+
+    def test_get_plot_point_colors_categorical_column(self):
+        # subset of the ids in df
+        exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
+        exp_color_dict = {
+            'foo': [0.5, 0., 0., 1.],
+            22: [0., 0., 0.5, 1.]
+        }
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'categorical', ['B', 'C', 'A'], 'jet')
+        npt.assert_almost_equal(obs[0], exp_colors)
+        npt.assert_equal(obs[1], exp_color_dict)
+
+        # all ids in df
+        exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
+                      [0., 0., 0.5, 1.]]
+        obs = self.min_ord_results._get_plot_point_colors(
+            self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
+        npt.assert_almost_equal(obs[0], exp_colors)
+        # should get same color dict as before
+        npt.assert_equal(obs[1], exp_color_dict)
+
+    def test_plot_categorical_legend(self):
+        fig = plt.figure()
+        ax = fig.add_subplot(111, projection='3d')
+
+        # we shouldn't have a legend yet
+        assert_true(ax.get_legend() is None)
+
+        self.min_ord_results._plot_categorical_legend(
+            ax, {'foo': 'red', 'bar': 'green'})
+
+        # make sure we have a legend now
+        legend = ax.get_legend()
+        assert_true(legend is not None)
+
+        # do some light sanity checking to make sure our input labels and
+        # colors are present. we're not using nose.tools.assert_items_equal
+        # because it isn't available in Python 3.
+        labels = [t.get_text() for t in legend.get_texts()]
+        npt.assert_equal(sorted(labels), ['bar', 'foo'])
+
+        colors = [l.get_color() for l in legend.get_lines()]
+        npt.assert_equal(sorted(colors), ['green', 'red'])
+
+    def test_repr_png(self):
+        obs = self.min_ord_results._repr_png_()
+        assert_is_instance(obs, binary_type)
+        assert_true(len(obs) > 0)
+
+    def test_repr_svg(self):
+        obs = self.min_ord_results._repr_svg_()
+        # print_figure(format='svg') can return text or bytes depending on the
+        # version of IPython
+        assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
+        assert_true(len(obs) > 0)
+
+    def test_png(self):
+        assert_is_instance(self.min_ord_results.png, Image)
+
+    def test_svg(self):
+        assert_is_instance(self.min_ord_results.svg, SVG)
+
+
+if __name__ == '__main__':
+    import nose
+    nose.runmodule()
diff --git a/skbio/stats/power.py b/skbio/stats/power.py
new file mode 100644
index 0000000..850d633
--- /dev/null
+++ b/skbio/stats/power.py
@@ -0,0 +1,994 @@
+r"""
+Empirical Power Estimation (:mod:`skbio.stats.power`)
+=====================================================
+
+.. currentmodule:: skbio.stats.power
+
+The purpose of this module is to provide empirical, post-hoc power estimation
+of normally and non-normally distributed data. It also provides support to
+subsample data to facilitate this analysis.
+
+The underlying principle is based on subsampling and Monte Carlo simulation.
+Assume that there is some set of populations, :math:`K_{1}, K_{2}, ... K_{n}`
+which have some property, :math:`\mu` such that :math:`\mu_{1} \neq \mu_{2}
+\neq ... \neq \mu_{n}`. For each of the populations, a sample, :math:`S` can be
+drawn, with a parameter, :math:`x` where :math:`x \approx \mu` and for the
+samples, we can use a test, :math:`f`, to show that :math:`x_{1} \neq x_{2}
+\neq ... \neq x_{n}`.
+
+Since we know that :math:`\mu_{1} \neq \mu_{2} \neq ... \neq \mu_{n}`,
+we know we should reject the null hypothesis. If we fail to reject the null
+hypothesis, we have committed a Type II error and our result is a false
+negative. We can estimate the frequency of Type II errors at various sampling
+depths by repeatedly subsampling the populations and observing how often we
+see a false negative. If we repeat this several times for each subsampling
+depth, and vary the depths we use, we can start to approximate a relationship
+between the number of samples we use and the rate of false negatives, also
+called the statistical power of the test.
+
+To generate complete power curves from data which appears underpowered, the
+`statsmodels.stats.power` package can be used to solve for an effect size. The
+effect size can be used to extrapolate a power curve for the data.
+
+Most functions in this module accept a statistical test function which takes a
+list of samples and returns a p value. The test is then evaluated over a series
+of subsamples.
+
+Sampling may be handled in two ways. For any set of samples, we may simply
+choose to draw :math:`n` observations at random for each sample. Alternatively,
+if metadata is avalaible, samples can be matched based on a set of control
+categories so that paired samples are drawn at random from the set of avaliable
+matches.
+
+Functions
+---------
+
+.. autosummary::
+    :toctree: generated/
+
+    subsample_power
+    subsample_paired_power
+    confidence_bound
+    bootstrap_power_curve
+    paired_subsamples
+
+Examples
+--------
+Suppose we wanted to test that there's a relationship between two random
+variables, `ind` and `dep`. Let's use random subsampling to estimate the
+statistical power of our test with an alpha of 0.1, 0.01, and 0.001.
+
+To control for the pseudo-random number generation, we will use a seed.
+When using these functions with your own data, you don't need to include the
+step.
+
+>>> import numpy as np
+>>> np.random.seed(20)
+>>> ind = np.random.randint(0, 20, 15)
+>>> ind
+array([ 3, 15,  9, 11,  7,  2,  0,  8, 19, 16,  6,  6, 16,  9,  5])
+>>> dep = (3 * ind + 5 + np.random.randn(15) * 5).round(3)
+>>> dep
+array([ 15.617,  47.533,  28.04 ,  33.788,  19.602,  12.229,   4.779,
+        36.838,  67.256,  55.032,  22.157,   7.051,  58.601,  38.664,
+        18.783])
+
+Let's define a test that will draw a list of sample pairs and determine
+if they're correlated. We'll use `scipy.stats.pearsonr` which takes two arrays
+and returns a correlation coefficient and a p-value representing the
+probability the two distributions are correlated.
+
+>>> from scipy.stats import pearsonr
+>>> f = lambda x: pearsonr(x[0], x[1])[1]
+
+Now, let's use random sampling to estimate the power of our test on
+the first distribution.
+
+>>> samples = [ind, dep]
+>>> f(samples)
+3.6459452596563003e-08
+
+In `subsample_power`, we can maintain a paired relationship between samples
+by setting `draw_mode` to "matched". We can also set our critical value, so
+that we estimate power for a critical value of :math:`\alpha = 0.05`, an
+estimate for the critical value of 0.01, and a critical value of 0.001.
+
+>>> from skbio.stats.power import subsample_power
+>>> pwr_100, counts_100 = subsample_power(test=f,
+...                                       samples=samples,
+...                                       min_observations=3,
+...                                       max_counts=10,
+...                                       min_counts=3,
+...                                       counts_interval=1,
+...                                       draw_mode="matched",
+...                                       alpha_pwr=0.1)
+>>> pwr_010, counts_010 = subsample_power(test=f,
+...                                       samples=samples,
+...                                       min_observations=3,
+...                                       max_counts=10,
+...                                       min_counts=3,
+...                                       counts_interval=1,
+...                                       draw_mode="matched",
+...                                       alpha_pwr=0.01)
+>>> pwr_001, counts_001 = subsample_power(test=f,
+...                                       samples=samples,
+...                                       min_observations=3,
+...                                       max_counts=10,
+...                                       min_counts=3,
+...                                       counts_interval=1,
+...                                       draw_mode="matched",
+...                                       alpha_pwr=0.001)
+>>> counts_100
+array([3, 4, 5, 6, 7, 8, 9])
+>>> pwr_100.mean(0)
+array([ 0.466 ,  0.827 ,  0.936 ,  0.9852,  0.998 ,  1.    ,  1.    ])
+>>> pwr_010.mean(0)
+array([ 0.0468,  0.2394,  0.5298,  0.8184,  0.951 ,  0.981 ,  0.9982])
+>>> pwr_001.mean(0)
+array([ 0.003 ,  0.0176,  0.1212,  0.3428,  0.5892,  0.8256,  0.9566])
+
+Based on this power estimate, as we increase our confidence that we have not
+committed a type I error and identified a false positive, the number of samples
+we need to be confident that we have not committed a type II error increases.
+
+"""
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import viewitems
+from future.builtins import range
+
+import numpy as np
+import scipy.stats
+
+
+def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05,
+                    min_observations=20, max_counts=50, counts_interval=10,
+                    min_counts=None, num_iter=500, num_runs=10):
+    r"""Subsamples data to iteratively calculate power
+
+    Parameters
+    ----------
+    test : function
+        The statistical test which accepts a list of arrays of values
+        (sample ids or numeric values) and returns a p value.
+    samples : array_like
+        `samples` can be a list of lists or a list of arrays where each
+        sublist or row in the array corresponds to a sampled group.
+    draw_mode : {"ind", "matched"}, optional
+        "matched" samples should be used when observations in
+        samples have corresponding observations in other groups. For instance,
+        this may be useful when working with regression data where
+        :math:`x_{1}, x_{2}, ..., x_{n}` maps to
+        :math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
+        length in "matched" mode.
+        If there is no reciprocal relationship between samples, then
+        "ind" mode should be used.
+    alpha_pwr : float, optional
+        The critical value used to calculate the power.
+    min_observations : unsigned int, optional
+        The minimum number of observations in any sample to
+        perform power analysis. Note that this is not the same as the minimum
+        number of samples drawn per group.
+    max_counts : unsigned int, optional
+        The maximum number of samples per group to draw for
+        effect size calculation.
+    counts_interval : unsigned int, optional
+        The difference between each subsampling count.
+    min_counts : unsigned int, optional
+        How many samples should be drawn for the smallest
+        subsample. If this is None, the `counts_interval` will be used.
+    num_iter : unsigned int, optional
+        The number of p-values to generate for each point
+        on the curve.
+    num_runs : unsigned int, optional
+        The number of times to calculate each curve.
+
+    Returns
+    -------
+    power : array
+        The power calculated for each subsample at each count.
+    sample_counts : array
+        The number of samples drawn at each power calculation.
+
+    Raises
+    ------
+    ValueError
+        If the `mode` is "matched", an error will occur if the arrays in
+        `samples` are not the same length.
+    ValueError
+        There is a ValueError if there are fewer samples than the minimum
+        count.
+    ValueError
+        If the `counts_interval` is greater than the difference between the
+        sample start and the max value, the function raises a ValueError.
+
+
+    Examples
+    --------
+    Let's say we wanted to look at the relationship between the presence of a
+    specific bacteria and the probability of a pre or post menopausal woman
+    experiencing a health outcome. Healthy women were enrolled in the study
+    either before or after menopause, and followed for five years. They
+    submitted fecal samples at regular intervals during that period, and were
+    assessed for a particular irreversible health outcome over that period.
+
+    16S sequencing and available literature suggest a set of candidate taxa
+    may be associated with the health outcome. Assume there are 100 samples
+    (50 premenopausal samples and 50 postmenopausal samples) where the taxa
+    of interest was identified by 16S sequencing and the taxonomic abundance
+    was confirmed in a certain fraction of samples at a minimum level.
+
+    We can simulate the probability that a woman positive for this taxa
+    experiences the health outcome using a binomial distribution.
+
+    >>> import numpy as np
+    >>> np.random.seed(25)
+    >>> pre_rate = np.random.binomial(1, 0.75, size=(50,))
+    >>> pre_rate
+    array([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
+           0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0,
+           1, 1, 1, 1])
+    >>> pos_rate = np.random.binomial(1, 0.25, size=(50,))
+    >>> pos_rate
+    array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,
+           0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0,
+           0, 1, 0, 0])
+
+    Let's set up a test function, so we can test the probability of
+    finding a difference in frequency between the two groups. We'll use
+    `scipy.stats.chisquare` to look for the difference in frequency between
+    groups.
+
+    >>> from scipy.stats import chisquare, nanmean
+    >>> test = lambda x: chisquare(np.array([x[i].sum() for i in
+    ...     xrange(len(x))]))[1]
+
+    Let's make sure that our two distributions are different.
+
+    >>> round(test([pre_rate, pos_rate]), 5)
+    9e-05
+
+    Since there are an even number of samples, and we don't have enough
+    information to try controlling the data, we'll use
+    `skbio.stats.power.subsample_power` to compare the two groups. If we had
+    metadata about other risk factors, like a family history, BMI, tobacco use,
+    we might want to use `skbio.stats.power.subsample_paired_power`.
+    We'll also use "ind" `draw_mode`, since there is no linkage between the
+    two groups of samples.
+
+    >>> from skbio.stats.power import subsample_power
+    >>> pwr_est, counts = subsample_power(test=test,
+    ...                                   samples=[pre_rate, pos_rate],
+    ...                                   counts_interval=5)
+    >>> counts
+    array([ 5, 10, 15, 20, 25, 30, 35, 40, 45])
+    >>> nanmean(pwr_est, 0)
+    array([ 0.1776,  0.3392,  0.658 ,  0.8856,  0.9804,  0.9982,  1.    ,
+            1.    ,  1.    ])
+
+    So, we can estimate that we will see a significant difference between
+    the two groups (:math:`\alpha \leq 0.05`) at least 80% of the time if we
+    use 20 observations per group.
+
+    If we wanted to test the relationship of a second candidate taxa which is
+    more rare in the population, but may have a similar effect, based on
+    available literature, we might also start by trying to identify 20
+    samples per group where the second candidate taxa is present.
+
+    """
+
+    # Determines the minimum number of ids in a category
+    num_ids = np.array([len(id_) for id_ in samples]).min()
+
+    # Checks that "matched" mode is handled appropriately
+    if draw_mode == "matched":
+        for id_ in samples:
+            if not len(id_) == num_ids:
+                raise ValueError('Each vector in samples must be the same '
+                                 'length in "matched" draw_mode.')
+
+    # Checks there are enough samples to subsample
+    if num_ids <= min_observations:
+        raise ValueError('There are not enough samples for subsampling.')
+
+    # Calculates the effect size vector
+    if min_counts is None:
+        min_counts = counts_interval
+
+    if (max_counts - min_counts) < counts_interval:
+        raise ValueError("No subsamples of the specified size can be drawn.")
+
+    sample_counts = np.arange(min_counts,
+                              min(max_counts, num_ids),
+                              counts_interval)
+
+    # Prealocates the power array
+    power = np.zeros((num_runs, len(sample_counts)))
+
+    # Calculates the power instances
+    for id1 in range(num_runs):
+        power[id1, :] = _calculate_power_curve(test,
+                                               samples,
+                                               sample_counts,
+                                               num_iter=num_iter,
+                                               alpha=alpha_pwr,
+                                               mode=draw_mode)
+
+    return power, sample_counts
+
+
+def subsample_paired_power(test, meta, cat, control_cats, order=None,
+                           strict_match=True, alpha_pwr=0.05,
+                           min_observations=20, max_counts=50,
+                           counts_interval=10, min_counts=None,
+                           num_iter=500, num_runs=10):
+    r"""Estimates power iteratively using samples with matching metadata
+
+    Parameters
+    ----------
+    test : function
+        The statistical test which accepts a list of arrays sample ids and
+        returns a p value.
+    meta : pandas.DataFrame
+        The metadata associated with the samples.
+    cat : str
+        The metadata category being varied between samples.
+    control_cats : list
+        The metadata categories to be used as controls. For example, if
+        you wanted to vary age (`cat` = "AGE"), you might want to control
+        for gender and health status (i.e. `control_cats` = ["SEX",
+        "HEALTHY"]).
+    order : list, optional
+        The order of groups in the category. This can be used
+        to limit the groups selected. For example, if there's a category with
+        groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
+        would be set to ['A', 'B'].
+    strict_match : bool, optional
+        This determines how data is grouped using
+        `control_cats`. If a sample within `meta` has an undefined value (NaN)
+        for any of the columns in `control_cats`, the sample will not be
+        considered as having a match and will be ignored when `strict_match`
+        is True. If `strict_match` is False, missing values (NaN) in the
+        `control_cats` can be considered matches.
+    alpha_pwr : float, optional
+        The critical value used to calculate the power.
+    min_observations : unsigned int, optional
+        The minimum number of paired samples which must exist
+        for a category and set of control categories to be able to subsample
+        and make power calculations. This is not the same as the minimum
+        number of observations to draw during subsampling.
+    max_counts : unsigned int, optional
+        The maximum number of observations per sample to draw
+        for effect size calculation.
+    counts_interval : unsigned int, optional
+        The difference between each subsampling count.
+    min_counts : unsigned int, optional
+        How many samples should be drawn for the smallest
+        subsample. If this is None, the `counts_interval` will be used.
+    num_iter : unsigned int, optional
+        The number of p-values to generate for each point on the curve.
+    num_runs : unsigned int, optional
+        The number of times to calculate each curve.
+
+    Returns
+    -------
+    power : array
+        The power calculated for each subsample at each count.
+    sample_counts : array
+        The number of samples drawn at each power calculation.
+
+    Raises
+    ------
+    ValueError
+        There is a value error if there are fewer samples than the minimum
+        count.
+    ValueError
+        If the `counts_interval` is greater than the difference between the
+        sample start and the max value, the function raises a ValueError.
+
+    Examples
+    --------
+    Assume you are interested in the role of a specific cytokine of protein
+    translocation in myloid-lineage cells. You are able to culture two
+    macrophage lineages (bone marrow derived phagocytes and
+    peritoneally-derived macrophages). Due to unfortunate circumstances, your
+    growth media must be acquired from multiple sources (lab, company A,
+    company B). Also unfortunate, you must use labor-intense low throughput
+    assays. You have some preliminary measurements, and you'd like to
+    predict how many (more) cells you need to analyze for 80% power.
+
+    You have information about 60 cells, which we'll simulate below. Note
+    that we are setting a random seed value for consistency.
+
+    >>> import numpy as np
+    >>> import pandas as pd
+    >>> np.random.seed(25)
+    >>> data = pd.DataFrame.from_dict({
+    ...     'CELL_LINE': np.random.binomial(1, 0.5, size=(60,)),
+    ...     'SOURCE': np.random.binomial(2, 0.33, size=(60,)),
+    ...     'TREATMENT': np.hstack((np.zeros((30)), np.ones((30)))),
+    ...     'INCUBATOR': np.random.binomial(1, 0.2, size=(60,))})
+    >>> data['OUTCOME'] = (0.25 + data.TREATMENT * 0.25) + \
+    ...     np.random.randn(60) * (0.1 + data.SOURCE/10 + data.CELL_LINE/5)
+    >>> data.loc[data.OUTCOME < 0, 'OUTCOME'] = 0
+    >>> data.loc[data.OUTCOME > 1, 'OUTCOME'] = 1
+
+    We will approach this by assuming that the distribution of our outcome is
+    not normally distributed, and apply a kruskal-wallis test to compare
+    between the cytokine treated and untreated cells.
+
+    >>> from scipy.stats import kruskal
+    >>> f = lambda x: kruskal(*[data.loc[i, 'OUTCOME'] for i in x])[1]
+
+    Let's check that cytokine treatment has a signifigant effect across all
+    the cells.
+
+    >>> treatment_stat = [g for g in data.groupby('TREATMENT').groups.values()]
+    >>> f(treatment_stat)
+    0.0019386336266250209
+
+    Now, let's pick the control categories. It seems reasonable to assume there
+    may be an effect of cell line on the treatment outcome, which may be
+    attributed to differences in receptor expression. It may also be possible
+    that there are differences due cytokine source. Incubators were maintained
+    under the same conditions throughout the experiment, within one degree of
+    temperature difference at any given time, and the same level of CO2.
+    So, at least initially, let's ignore differences due to the incubator.
+
+    It's recommended that as a first pass analysis, control variables be
+    selected based on an idea of what may be biologically relevant to the
+    system, although further iteration might encourage the consideration of
+    variable with effect sizes similar, or larger than the variable of
+    interest.
+
+    >>> control_cats = ['SOURCE', 'CELL_LINE']
+    >>> from skbio.stats.power import subsample_paired_power
+    >>> pwr, cnt = subsample_paired_power(test=f,
+    ...                                   meta=data,
+    ...                                   cat='TREATMENT',
+    ...                                   control_cats=control_cats,
+    ...                                   min_observations=5,
+    ...                                   counts_interval=5,
+    ...                                   num_iter=100,
+    ...                                   num_runs=5)
+    >>> cnt
+    array([ 5, 10, 15, 20])
+    >>> pwr.mean(0)
+    array([ 0.15 ,  0.376,  0.614,  0.836])
+    >>> pwr.std(0).round(3)
+    array([ 0.046,  0.106,  0.176,  0.153])
+
+    Estimating off the power curve, it looks like 20 cells per group may
+    provide addiquite power for this experiment, although the large variance
+    in power might suggest extending the curves or increasing the number of
+    samples per group.
+
+    """
+
+    # Checks for the number of sampling pairs avaliable
+    sub_ids = paired_subsamples(meta, cat, control_cats, order, strict_match)
+
+    # Determines the minimum number of ids avaliable
+    num_ids = len(sub_ids[0])
+
+    # Checks there are enough samples to subsample
+    if num_ids <= min_observations:
+        raise ValueError('There are not enough samples for subsampling.')
+
+    # Calculates the effect size vector
+    if min_counts is None:
+        min_counts = counts_interval
+
+    if (max_counts - min_counts) < counts_interval:
+        raise ValueError("No subsamples of the specified size can be drawn.")
+
+    sample_counts = np.arange(min_counts,
+                              min(max_counts, num_ids),
+                              counts_interval)
+
+    # Prealocates the power array
+    power = np.zeros((num_runs, len(sample_counts)))
+
+    power[0, :] = _calculate_power_curve(test,
+                                         sub_ids,
+                                         sample_counts,
+                                         mode="matched",
+                                         num_iter=num_iter,
+                                         alpha=alpha_pwr)
+
+    for id1 in np.arange(1, num_runs):
+        sub_ids = paired_subsamples(meta, cat, control_cats, order,
+                                    strict_match)
+        # Calculates the power curve
+        power[id1, :] = _calculate_power_curve(test,
+                                               sub_ids,
+                                               sample_counts,
+                                               num_iter=num_iter,
+                                               alpha=alpha_pwr,
+                                               mode="matched")
+
+    return power, sample_counts
+
+
+def confidence_bound(vec, alpha=0.05, df=None, axis=None):
+    r"""Calculates a confidence bound assuming a normal distribution
+
+    Parameters
+    ----------
+    vec : array_like
+        The array of values to use in the bound calculation.
+    alpha : float, optional
+        The critical value, used for the confidence bound calculation.
+    df : float, optional
+        The degrees of freedom associated with the
+        distribution. If None is given, df is assumed to be the number of
+        elements in specified axis.
+    axis : unsigned int, optional
+        The axis over which to take the deviation. When axis
+        is None, a single value will be calculated for the whole matrix.
+
+    Returns
+    -------
+    bound : float
+        The confidence bound around the mean. The confidence interval is
+        [mean - bound, mean + bound].
+
+    """
+
+    # Determines the number of non-nan counts
+    vec = np.asarray(vec)
+    vec_shape = vec.shape
+    if axis is None and len(vec_shape) == 1:
+        num_counts = vec_shape[0] - np.isnan(vec).sum()
+    elif axis is None:
+        num_counts = vec_shape[0] * vec_shape[1] - np.isnan(vec).sum()
+    else:
+        num_counts = vec_shape[axis] - np.isnan(vec).sum() / \
+            (vec_shape[0] * vec_shape[1])
+
+    # Gets the df if not supplied
+    if df is None:
+        df = num_counts - 1
+
+    # Calculates the bound
+    bound = scipy.stats.nanstd(vec, axis=axis) / np.sqrt(num_counts - 1) * \
+        scipy.stats.t.ppf(1 - alpha / 2, df)
+
+    return bound
+
+
+def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
+                          alpha=0.05, mode='ind', num_iter=500, num_runs=10):
+    r"""Repeatedly calculates the power curve for a specified alpha level
+
+    Parameters
+    ----------
+    test : function
+        The statistical test which accepts an array_like of sample ids
+        (list of lists or arrays) and returns a p-value.
+    samples : array_like
+        samples can be a list of lists or an array where each sublist or row in
+        the array corresponds to a sampled group.
+    sample_counts : 1-D array_like
+        A vector of the number of samples which should be sampled in each curve
+    ratio : 1-D array_like, optional
+        The fraction of the sample counts which should be
+        assigned to each
+        group. This must be a none-type object, or the same length as samples.
+        If Ratio is None, the same number of observations are drawn from
+        each sample.
+    alpha : float, optional
+        The default is 0.05. The critical value for calculating power.
+    mode : {"ind", "matched"}, optional
+        "matched" samples should be used when observations in
+        samples have corresponding observations in other groups. For instance,
+        this may be useful when working with regression data where
+        :math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
+        y_{n}`.
+    num_iter : unsigned int, optional
+        The number of p-values to generate for each point on the curve.
+    num_runs : unsigned int, optional
+        The number of times to calculate each curve.
+
+    Returns
+    -------
+    power_mean : 1-D array
+        The mean p-values from the iterations.
+    power_bound : vector
+        The variance in the p-values.
+
+    Examples
+    --------
+    Suppose we have 100 samples randomly drawn from two normal distribitions,
+    the first with mean 0 and standard devation 1, and the second with mean 3
+    and standard deviation 1.5
+
+    >>> import numpy as np
+    >>> np.random.seed(20)
+    >>> samples_1 = np.random.randn(100)
+    >>> samples_2 = 1.5 * np.random.randn(100) + 1
+
+    We want to test the statistical power of a independent two sample t-test
+    comparing the two populations. We can define an anonymous function, `f`,
+    to wrap the scipy function for independent t tests,
+    `scipy.stats.ttest_ind`. The test function will take a list of value
+    vectors and return a p value.
+
+    >>> from scipy.stats import ttest_ind
+    >>> f = lambda x: ttest_ind(x[0], x[1])[1]
+
+    Now, we can determine the statistical power, or the probability that we do
+    not have a false negative given that we do not have a false positive, by
+    varying a number of subsamples.
+
+    >>> from skbio.stats.power import bootstrap_power_curve
+    >>> sample_counts = np.arange(5, 80, 5)
+    >>> power_mean, power_bound = bootstrap_power_curve(f,
+    ...                                                 [samples_1, samples_2],
+    ...                                                 sample_counts)
+    >>> sample_counts[power_mean - power_bound.round(3) > .80].min()
+    20
+
+    Based on this analysis, it looks like we need at least 20 observations
+    from each distribution to avoid committing a type II error more than 20%
+    of the time.
+
+    """
+
+    # Corrects the alpha value into a matrix
+    alpha = np.ones((num_runs)) * alpha
+
+    # Boot straps the power curve
+    power = _calculate_power_curve(test=test,
+                                   samples=samples,
+                                   sample_counts=sample_counts,
+                                   ratio=ratio,
+                                   num_iter=num_iter,
+                                   alpha=alpha,
+                                   mode=mode)
+
+    # Calculates two summary statitics
+    power_mean = power.mean(0)
+    power_bound = confidence_bound(power, alpha=alpha[0], axis=0)
+
+    # Calculates summary statitics
+    return power_mean, power_bound
+
+
+def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
+    r"""Gets a set of samples to serve as controls
+
+    This function is designed to provide controlled samples, based on a
+    metadata category. For example, one could control for age, sex, education
+    level, and diet type while measuring exercise frequency. No outcome
+    value is considered in this subsampling process.
+
+    Parameters
+    ----------
+    meta : pandas.DataFrame
+        The metadata associated with the samples.
+    cat : str, list
+        The metadata category (or a list of categories) for comparison.
+    control_cats : list
+        The metadata categories to be used as controls. For example, if you
+        wanted to vary age (`cat` = "AGE"), you might want to control for
+        gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
+    order : list, optional
+        The order of groups in the category. This can be used
+        to limit the groups selected. For example, if there's a category with
+        groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
+        would be set to ['A', 'B'].
+    strict_match: bool, optional
+        This determines how data is grouped using
+        `control_cats`. If a sample within `meta` has an undefined value (NaN)
+        for any of the columns in `control_cats`, the sample will not be
+        considered as having a match and will be ignored when `strict_match`
+        is True. If `strict_match` is False, missing values (NaN) in the
+        `control_cats` can be considered matches.
+
+    Returns
+    -------
+    ids : array
+        a set of ids which satisfy the criteria. These are not grouped by
+        `cat`. An empty array indicates there are no sample ids which satisfy
+        the requirements.
+
+    Examples
+    --------
+    If we have a mapping file for a set of random individuals looking at
+    housing, sex, age and antibiotic use.
+
+    >>> import pandas as pd
+    >>> import numpy as np
+    >>> meta = {'SW': {'HOUSING': '2', 'SEX': 'M', 'AGE': np.nan, 'ABX': 'Y'},
+    ...         'TS': {'HOUSING': '2', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
+    ...         'CB': {'HOUSING': '3', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
+    ...         'BB': {'HOUSING': '1', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'}}
+    >>> meta = pd.DataFrame.from_dict(meta, orient="index")
+    >>> meta #doctest: +SKIP
+       ABX HOUSING  AGE SEX
+    BB   Y       1  40s   M
+    CB   Y       3  40s   M
+    SW   Y       2  NaN   M
+    TS   Y       2  40s   M
+
+    We may want to vary an individual's housing situation, while holding
+    constant their age, sex and antibiotic use so we can estimate the effect
+    size for housing, and later compare it to the effects of other variables.
+
+    >>> from skbio.stats.power import paired_subsamples
+    >>> ids = paired_subsamples(meta, 'HOUSING', ['SEX', 'AGE', 'ABX'])
+    >>> np.hstack(ids) #doctest: +ELLIPSIS
+    array(['BB', 'TS', 'CB']...
+
+    So, for this set of data, we can match TS, CB, and BB based on their age,
+    sex, and antibiotic use. SW cannot be matched in either group becuase
+    `strict_match` was true, and there is missing AGE data for this sample.
+
+    """
+
+    # Sets the index data
+    # Groups meta by category
+    cat_groups = meta.groupby(cat).groups
+
+    # Handles the order argument
+    if order is None:
+        order = sorted(cat_groups.keys())
+    order = np.array(order)
+    num_groups = len(order)
+
+    # Determines the number of samples, and the experimental and control group
+    group_size = np.array([len(cat_groups[o]) for o in order])
+    ctrl_name = order[group_size == group_size.min()][0]
+    order = order[order != ctrl_name]
+
+    # Gets a control group table
+    ctrl_match_groups = meta.groupby(control_cats).groups
+    ctrl_group = meta.loc[cat_groups[ctrl_name]
+                          ].groupby(list(control_cats)).groups
+
+    ids = [np.array([])] * num_groups
+    # Loops through samples in the experimental group to match for controls
+    for check_group, ctrl_ids in viewitems(ctrl_group):
+        # Checks the categories have been defined
+        undefed_check = np.array([_check_strs(p) for p in check_group])
+        if not undefed_check.all() and strict_match:
+            continue
+        # Removes the matched ids from order
+        matched_ids = ctrl_match_groups[check_group]
+        for id_ in ctrl_ids:
+            matched_ids.remove(id_)
+        pos_ids = []
+        num_ids = [len(ctrl_ids)]
+        # Gets the matrix of the matched ids and groups them
+        exp_group = meta.loc[matched_ids].groupby(cat).groups
+        for grp in order:
+            # Checks group to be considered is included in the grouping
+            if grp not in exp_group:
+                break
+            # Gets the id associated with the group
+            pos_ids.append(exp_group[grp])
+            num_ids.append(len(exp_group[grp]))
+        # Determines the minimum number of samples
+        num_draw = np.array(num_ids).min()
+        # Draws samples from possible ids
+        exp_ids = [np.random.choice(ctrl_ids, num_draw, replace=False)]
+        exp_ids.extend([np.random.choice(id_, num_draw, replace=False)
+                        for id_ in pos_ids])
+
+        if len(exp_ids) == num_groups:
+            for idx in range(num_groups):
+                ids[idx] = np.hstack((ids[idx], exp_ids[idx]))
+
+    return ids
+
+
+def _check_strs(x):
+    r"""Returns False if x is a nan and True is x is a string or number"""
+
+    if isinstance(x, str):
+        return True
+    elif isinstance(x, (float, int)):
+        return not np.isnan(x)
+    else:
+        raise TypeError('input must be a string, float or a nan')
+
+
+def _calculate_power(p_values, alpha=0.05):
+    r"""Calculates statical power empirically
+
+    Parameters
+    ----------
+    p_values : 1-D array
+        A 1-D numpy array with the test results.
+
+    alpha : float
+        The critical value for the power calculation.
+
+    Returns
+    -------
+    power : float
+        The emperical power, or the fraction of observed p values below the
+        critical value.
+
+    """
+
+    w = (p_values < float(alpha)).sum()/float(p_values.shape[0])
+
+    return w
+
+
+def _compare_distributions(test, samples, counts=5, mode="ind", num_iter=1000):
+    r"""Compares two distribution arrays iteratively
+
+    Parameters
+    ----------
+    test : function
+        The statistical test which accepts an array_like of sample ids
+        (list of lists) and returns a p-value.
+    samples : list of arrays
+        A list where each 1-d array represents a sample. If `mode` is
+        "matched", there must be an equal number of observations in each
+        sample.
+    counts : unsigned int or 1-D array, optional
+        The number of samples to draw from each distribution.
+        If this is a 1-D array, the length must correspond to the number of
+        samples. The function will not draw more observations than are in a
+        sample. In "matched" `mode`, the same number of observations will be
+        drawn from each group.
+    mode : {"ind", "matched"}, optional
+        "matched" samples should be used when observations in
+        samples have corresponding observations in other groups. For instance,
+        this may be useful when working with regression data where
+        :math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
+        y_{n}`.
+    num_iter : int, optional
+        Default 1000. The number of p-values to generate for each point on the
+        curve.
+
+    Returns
+    -------
+    p_values : array
+        The p-values for `n_iter` subsampled tests.
+
+    Raises
+    ------
+    ValueError
+        If mode is not "ind" or "matched".
+    ValueError
+        If the arrays in samples are not the same length in "matched" mode.
+    ValueError
+        If counts is a 1-D array and counts and samples are different lengths.
+
+    """
+
+    # Determines the number of groups
+    num_groups = len(samples)
+
+    # Checks the mode
+    if mode not in {'ind', 'matched'}:
+        raise ValueError('Supported sample modes are "ind" and "matched".')
+
+    # Handles the number of samples for later instances
+    if isinstance(counts, int):
+        counts = np.array([counts] * num_groups)
+
+    if not len(counts) == num_groups:
+        raise ValueError('If counts is a 1-D array, there must be a count to'
+                         ' draw for each group.')
+
+    # Checks the group length
+    samp_lens = [len(sample) for sample in samples]
+    # Checks the group length
+    if mode == 'matched' and np.array([samp_lens[i] != samp_lens[i+1] for i in
+                                       range(num_groups-1)]).all():
+        raise ValueError('In "matched" mode, each sample must have the same'
+                         ' number of observations.')
+    if np.array([samp_lens[i] < counts[i] for i in range(num_groups)]).any():
+        raise ValueError('You cannot choose more observations that exist '
+                         'in a sample.')
+
+    # Prealocates the pvalue matrix
+    p_values = np.zeros((num_iter))
+
+    for idx in range(num_iter):
+        if mode == "matched":
+            pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
+                                   replace=False)
+            subs = [sample[pos] for sample in samples]
+        else:
+            subs = [np.random.choice(np.array(pop), counts[i], replace=False)
+                    for i, pop in enumerate(samples)]
+
+        p_values[idx] = test(subs)
+
+    return p_values
+
+
+def _calculate_power_curve(test, samples, sample_counts, ratio=None,
+                           mode='ind', num_iter=1000, alpha=0.05):
+    r"""Generates an empirical power curve for the samples.
+
+    Parameters
+    ----------
+    test : function
+        The statistical test which accepts an list of arrays of values and
+        returns a p value.
+    samples : array_like
+        `samples` can be a list of lists or an array where each sublist or row
+        in the array corresponds to a sampled group.
+    sample_counts : 1-D array
+        A vector of the number of samples which should be sampled in each
+        curve.
+    mode : {"ind", "matched"}, optional
+        "matched" samples should be used when observations in
+        samples have corresponding observations in other groups. For instance,
+        this may be useful when working with regression data where
+        :math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
+        y_{n}`.
+    ratio : 1-D array, optional
+        The fraction of the sample counts which should be
+        assigned to each group. If this is a 1-D array, it must be the same
+        length as `samples`. If no value is supplied (`ratio` is None),
+        then an equal number of observations will be drawn for each sample.
+    num_iter : int
+        The default is 1000. The number of p-values to generate for each point
+        on the curve.
+
+    Returns
+    -------
+    p_values : array
+        The p-values associated with the input sample counts.
+
+    Raises
+    ------
+    ValueError
+        If ratio is an array and ratio is not the same length as samples
+
+    """
+
+    # Casts array-likes to arrays
+    sample_counts = np.asarray(sample_counts)
+
+    # Determines the number of groups
+    num_groups = len(samples)
+    num_samps = len(sample_counts)
+    if isinstance(alpha, float):
+        vec = True
+        pwr = np.zeros((num_samps))
+        alpha = np.array([alpha])
+    else:
+        vec = False
+        num_crit = alpha.shape[0]
+        pwr = np.zeros((num_crit, num_samps))
+
+    # Checks the ratio argument
+    if ratio is None:
+        ratio = np.ones((num_groups))
+    ratio = np.asarray(ratio)
+    if not ratio.shape == (num_groups,):
+        raise ValueError('There must be a ratio for each group.')
+
+    # Loops through the sample sizes
+    for id2, s in enumerate(sample_counts):
+        count = np.round(s * ratio, 0).astype(int)
+        for id1, a in enumerate(alpha):
+            ps = _compare_distributions(test=test,
+                                        samples=samples,
+                                        counts=count,
+                                        num_iter=num_iter,
+                                        mode=mode)
+            if vec:
+                pwr[id2] = _calculate_power(ps, a)
+            else:
+                pwr[id1, id2] = _calculate_power(ps, a)
+
+    return pwr
diff --git a/skbio/stats/spatial.py b/skbio/stats/spatial.py
new file mode 100644
index 0000000..903847e
--- /dev/null
+++ b/skbio/stats/spatial.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+r"""
+Spatial Statistics (:mod:`skbio.stats.spatial`)
+===============================================
+
+.. currentmodule:: skbio.stats.spatial
+
+This module provides functions for spatial analysis.
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   procrustes
+
+"""
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+
+
+def procrustes(data1, data2):
+    r"""Procrustes analysis, a similarity test for two data sets
+
+    Each input matrix is a set of points or vectors (the rows of the matrix).
+    The dimension of the space is the number of columns of each matrix. Given
+    two identially sized matrices, procrustes standardizes both such that:
+
+    - trace(AA') = 1  (A' is the transpose, and the product is a standard
+      matrix product).
+    - Both sets of points are centered around the origin.
+
+    Procrustes ([1]_, [2]_) then applies the optimal transform to the second
+    matrix (including scaling/dilation, rotations, and reflections) to minimize
+    M^2 = sum(square(mtx1 - mtx2)), or the sum of the squares of the pointwise
+    differences between the two input datasets.
+
+    If two data sets have different dimensionality (different number of
+    columns), simply add columns of zeros the the smaller of the two.
+
+    This function was not designed to handle datasets with different numbers of
+    datapoints (rows).
+
+    Parameters
+    ----------
+    data1 : array_like
+        matrix, n rows represent points in k (columns) space data1 is the
+        reference data, after it is standardised, the data from data2 will
+        be transformed to fit the pattern in data1 (must have >1 unique
+        points).
+
+    data2 : array_like
+        n rows of data in k space to be fit to data1.  Must be the  same
+        shape (numrows, numcols) as data1 (must have >1 unique points).
+
+
+    Returns
+    -------
+    mtx1 : array_like
+        a standardized version of data1
+    mtx2 : array_like
+        the orientation of data2 that best fits data1. Centered, but not
+        necessarily trace(mtx2*mtx2') = 1
+    disparity : array_like
+        M^2 defined above
+
+
+    Notes
+    -----
+
+    - The disparity should not depend on the order of the input matrices, but
+      the output matrices will, as only the first output matrix is guaranteed
+      to be scaled such that ``trace(AA') = 1``.
+
+    - Duplicate datapoints are generally ok, duplicating a data point will
+      increase its effect on the procrustes fit.
+
+    - The disparity scales as the number of points per input matrix.
+
+    References
+    ----------
+
+    .. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
+    .. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> from skbio.stats.spatial import procrustes
+    >>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
+    >>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
+    >>> mtx1, mtx2, disparity = procrustes(a, b)
+    >>> print(round(disparity))
+    0.0
+
+    """
+    num_rows, num_cols = np.shape(data1)
+    if (num_rows, num_cols) != np.shape(data2):
+        raise ValueError("input matrices must be of same shape")
+    if num_rows == 0 or num_cols == 0:
+        raise ValueError("input matrices must be >0 rows, >0 cols")
+
+    # standardize each matrix
+    mtx1 = _center(data1)
+    mtx2 = _center(data2)
+
+    if (not np.any(mtx1)) or (not np.any(mtx2)):
+        raise ValueError("input matrices must contain >1 unique points")
+
+    mtx1 = _normalize(mtx1)
+    mtx2 = _normalize(mtx2)
+
+    # transform mtx2 to minimize disparity (sum( (mtx1[i,j] - mtx2[i,j])^2) )
+    mtx2 = _match_points(mtx1, mtx2)
+
+    disparity = _get_disparity(mtx1, mtx2)
+
+    return mtx1, mtx2, disparity
+
+
+def _center(mtx):
+    """Translate all data (rows of the matrix) to center on the origin
+
+    Parameters
+    ----------
+    mtx : array_like
+        Matrix to translate the data for.
+
+    Returns
+    -------
+    result : array_like ('d') array
+        Shifted version of the input data.  The new matrix is such that the
+        center of mass of the row vectors is centered at the origin.
+
+    """
+    result = np.array(mtx, 'd')
+    result -= np.mean(result, 0)
+    # subtract each column's mean from each element in that column
+    return result
+
+
+def _normalize(mtx):
+    """change scaling of data (in rows) such that trace(mtx*mtx') = 1
+
+    Parameters
+    ----------
+    mtx : array_like
+        Matrix to scale the data for.
+
+    Notes
+    -----
+    mtx' denotes the transpose of mtx
+
+    """
+    mtx = np.asarray(mtx, dtype=float)
+    return mtx / np.linalg.norm(mtx)
+
+
+def _match_points(mtx1, mtx2):
+    """Returns a transformed mtx2 that matches mtx1.
+
+    Returns
+    -------
+
+    A new matrix which is a transform of mtx2.  Scales and rotates a copy of
+    mtx 2.  See procrustes docs for details.
+
+    """
+    u, s, vh = np.linalg.svd(np.dot(np.transpose(mtx1), mtx2))
+    q = np.dot(np.transpose(vh), np.transpose(u))
+    new_mtx2 = np.dot(mtx2, q)
+    new_mtx2 *= np.sum(s)
+
+    return new_mtx2
+
+
+def _get_disparity(mtx1, mtx2):
+    """Measures the dissimilarity between two data sets
+
+    Returns
+    -------
+
+    M^2 = sum(square(mtx1 - mtx2)), the pointwise sum of squared differences
+
+    """
+    return(np.sum(np.square(mtx1 - mtx2)))
diff --git a/skbio/stats/tests/__init__.py b/skbio/stats/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/stats/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/stats/tests/data/cr_data_out b/skbio/stats/tests/data/cr_data_out
new file mode 100644
index 0000000..344fd19
--- /dev/null
+++ b/skbio/stats/tests/data/cr_data_out
@@ -0,0 +1,6 @@
+Grouped by "bar", probability: 0.011000
+For group "Foo", the group means is: -1.439800
+The info is: [('mean', -1.4398), ('std', 1.3184)]
+For group "Bar", the group means is: 5.958900
+Cannot calculate the first difference with a window of size (3).
+The info is: [('mean', 5.9589), ('std', 2.7942)]
diff --git a/skbio/stats/tests/data/cr_data_raw b/skbio/stats/tests/data/cr_data_raw
new file mode 100644
index 0000000..8a4fd82
--- /dev/null
+++ b/skbio/stats/tests/data/cr_data_raw
@@ -0,0 +1,8 @@
+Grouped by "bar"
+For group "Foo":
+The trajectory is:
+[-2.675, -0.251, -2.8322, 0.0]
+For group "Bar":
+Cannot calculate the first difference with a window of size (3).
+The trajectory is:
+[9.6823, 2.9511, 5.2434]
diff --git a/skbio/stats/tests/data/cr_no_data_out b/skbio/stats/tests/data/cr_no_data_out
new file mode 100644
index 0000000..9b09df2
--- /dev/null
+++ b/skbio/stats/tests/data/cr_no_data_out
@@ -0,0 +1 @@
+Grouped by "foo": This group can not be used. All groups should have more than 1 element.
diff --git a/skbio/stats/tests/data/cr_no_data_raw b/skbio/stats/tests/data/cr_no_data_raw
new file mode 100644
index 0000000..e69de29
diff --git a/skbio/stats/tests/data/gr_w_msg_out b/skbio/stats/tests/data/gr_w_msg_out
new file mode 100644
index 0000000..909c164
--- /dev/null
+++ b/skbio/stats/tests/data/gr_w_msg_out
@@ -0,0 +1,3 @@
+For group "Bar", the group means is: 5.958900
+Cannot calculate the first difference with a window of size (3).
+The info is: [('mean', 5.9589), ('std', 2.7942)]
diff --git a/skbio/stats/tests/data/gr_w_msg_raw b/skbio/stats/tests/data/gr_w_msg_raw
new file mode 100644
index 0000000..43bdd49
--- /dev/null
+++ b/skbio/stats/tests/data/gr_w_msg_raw
@@ -0,0 +1,4 @@
+For group "Bar":
+Cannot calculate the first difference with a window of size (3).
+The trajectory is:
+[9.6823, 2.9511, 5.2434]
diff --git a/skbio/stats/tests/data/gr_wo_msg_out b/skbio/stats/tests/data/gr_wo_msg_out
new file mode 100644
index 0000000..3b8fa55
--- /dev/null
+++ b/skbio/stats/tests/data/gr_wo_msg_out
@@ -0,0 +1,2 @@
+For group "Foo", the group means is: -1.439800
+The info is: [('mean', -1.4398), ('std', 1.3184)]
diff --git a/skbio/stats/tests/data/gr_wo_msg_raw b/skbio/stats/tests/data/gr_wo_msg_raw
new file mode 100644
index 0000000..d56bfbd
--- /dev/null
+++ b/skbio/stats/tests/data/gr_wo_msg_raw
@@ -0,0 +1,3 @@
+For group "Foo":
+The trajectory is:
+[-2.675, -0.251, -2.8322, 0.0]
diff --git a/skbio/stats/tests/data/vr_out b/skbio/stats/tests/data/vr_out
new file mode 100644
index 0000000..91b54eb
--- /dev/null
+++ b/skbio/stats/tests/data/vr_out
@@ -0,0 +1,12 @@
+Trajectory algorithm: wdiff
+** This output is weighted **
+
+Grouped by "foo": This group can not be used. All groups should have more than 1 element.
+
+Grouped by "bar", probability: 0.011000
+For group "Foo", the group means is: -1.439800
+The info is: [('mean', -1.4398), ('std', 1.3184)]
+For group "Bar", the group means is: 5.958900
+Cannot calculate the first difference with a window of size (3).
+The info is: [('mean', 5.9589), ('std', 2.7942)]
+
diff --git a/skbio/stats/tests/data/vr_raw b/skbio/stats/tests/data/vr_raw
new file mode 100644
index 0000000..24b5317
--- /dev/null
+++ b/skbio/stats/tests/data/vr_raw
@@ -0,0 +1,13 @@
+Trajectory algorithm: wdiff
+** This output is weighted **
+
+
+Grouped by "bar"
+For group "Foo":
+The trajectory is:
+[-2.675, -0.251, -2.8322, 0.0]
+For group "Bar":
+Cannot calculate the first difference with a window of size (3).
+The trajectory is:
+[9.6823, 2.9511, 5.2434]
+
diff --git a/skbio/stats/tests/data/vr_real_out b/skbio/stats/tests/data/vr_real_out
new file mode 100644
index 0000000..31e9538
--- /dev/null
+++ b/skbio/stats/tests/data/vr_real_out
@@ -0,0 +1,14 @@
+Trajectory algorithm: avg
+
+Grouped by "Description": This group can not be used. All groups should have more than 1 element.
+
+Grouped by "DOB": This group can not be used. All groups should have more than 1 element.
+
+Grouped by "Weight": This group can not be used. All groups should have more than 1 element.
+
+Grouped by "Treatment", probability: 0.933100
+For group "Control", the group means is: 4.050800
+The info is: [('avg', 4.0508)]
+For group "Fast", the group means is: 4.159600
+The info is: [('avg', 4.1596)]
+
diff --git a/skbio/stats/tests/data/vr_real_raw b/skbio/stats/tests/data/vr_real_raw
new file mode 100644
index 0000000..a27f7d7
--- /dev/null
+++ b/skbio/stats/tests/data/vr_real_raw
@@ -0,0 +1,13 @@
+Trajectory algorithm: avg
+
+
+
+
+Grouped by "Treatment"
+For group "Control":
+The trajectory is:
+[2.3694, 3.3716, 5.4452, 4.5704, 4.4972]
+For group "Fast":
+The trajectory is:
+[7.2220, 4.2726, 1.1169, 4.0271]
+
diff --git a/skbio/stats/tests/test_gradient.py b/skbio/stats/tests/test_gradient.py
new file mode 100644
index 0000000..3e142c1
--- /dev/null
+++ b/skbio/stats/tests/test_gradient.py
@@ -0,0 +1,1033 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+from six import StringIO
+from future.builtins import zip
+
+from operator import attrgetter
+from unittest import TestCase, main
+
+import numpy as np
+import pandas as pd
+import numpy.testing as npt
+import pandas.util.testing as pdt
+
+from skbio.util import get_data_path
+from skbio.stats.gradient import (GradientANOVA, AverageGradientANOVA,
+                                  TrajectoryGradientANOVA,
+                                  FirstDifferenceGradientANOVA,
+                                  WindowDifferenceGradientANOVA, GroupResults,
+                                  CategoryResults, GradientANOVAResults,
+                                  _weight_by_vector, _ANOVA_trajectories)
+
+
+class BaseTests(TestCase):
+    def setUp(self):
+        """Initializes some data for testing"""
+        coord_data = {
+            'PC.636': np.array([-0.212230626531, 0.216034194368, 0.03532727349,
+                                -0.254450494129, -0.0687468542543,
+                                0.231895596562, 0.00496549154314,
+                                -0.0026246871695, 9.73837390723e-10]),
+            'PC.635': np.array([-0.277487312135, -0.0295483215975,
+                                -0.0744173437992, 0.0957182357964,
+                                0.204714844022, -0.0055407341857,
+                                -0.190287966833, 0.16307126638,
+                                9.73837390723e-10]),
+            'PC.356': np.array([0.220886492631, 0.0874848360559,
+                                -0.351990132198, -0.00316535032886,
+                                0.114635191853, -0.00019194106125,
+                                0.188557853937, 0.030002427212,
+                                9.73837390723e-10]),
+            'PC.481': np.array([0.0308923744062, -0.0446295973489,
+                                0.133996451689, 0.29318228566, -0.167812539312,
+                                0.130996149793, 0.113551017379, 0.109987942454,
+                                9.73837390723e-10]),
+            'PC.354': np.array([0.27616778138, -0.0341866951102,
+                                0.0633000238256, 0.100446653327,
+                                0.123802521199, 0.1285839664, -0.132852841046,
+                                -0.217514322505, 9.73837390723e-10]),
+            'PC.593': np.array([0.202458130052, -0.115216120518,
+                                0.301820871723, -0.18300251046, 0.136208248567,
+                                -0.0989435556722, 0.0927738484879,
+                                0.0909429797672, 9.73837390723e-10]),
+            'PC.355': np.array([0.236467470907, 0.21863434374,
+                                -0.0301637746424, -0.0225473129718,
+                                -0.205287183891, -0.180224615141,
+                                -0.165277751908, 0.0411933458557,
+                                9.73837390723e-10]),
+            'PC.607': np.array([-0.105517545144, -0.41405687433,
+                                -0.150073017617, -0.116066751485,
+                                -0.158763393475, -0.0223918378516,
+                                -0.0263068046112, -0.0501209518091,
+                                9.73837390723e-10]),
+            'PC.634': np.array([-0.371636765565, 0.115484234741,
+                                0.0721996475289, 0.0898852445906,
+                                0.0212491652909, -0.184183028843,
+                                0.114877153051, -0.164938000185,
+                                9.73837390723e-10])
+            }
+        self.coords = pd.DataFrame.from_dict(coord_data, orient='index')
+
+        coord_data = {
+            'PC.636': np.array([-0.212230626531, 0.216034194368,
+                                0.03532727349]),
+            'PC.635': np.array([-0.277487312135, -0.0295483215975,
+                                -0.0744173437992]),
+            'PC.356': np.array([0.220886492631, 0.0874848360559,
+                                -0.351990132198]),
+            'PC.481': np.array([0.0308923744062, -0.0446295973489,
+                                0.133996451689]),
+            'PC.354': np.array([0.27616778138, -0.0341866951102,
+                                0.0633000238256]),
+            'PC.593': np.array([0.202458130052, -0.115216120518,
+                                0.301820871723]),
+            'PC.355': np.array([0.236467470907, 0.21863434374,
+                                -0.0301637746424]),
+            'PC.607': np.array([-0.105517545144, -0.41405687433,
+                                -0.150073017617]),
+            'PC.634': np.array([-0.371636765565, 0.115484234741,
+                                0.0721996475289])
+            }
+        self.coords_3axes = pd.DataFrame.from_dict(coord_data, orient='index')
+
+        metadata_map = {'PC.354': {'Treatment': 'Control',
+                                   'DOB': '20061218',
+                                   'Weight': '60',
+                                   'Description': 'Control_mouse_I.D._354'},
+                        'PC.355': {'Treatment': 'Control',
+                                   'DOB': '20061218',
+                                   'Weight': '55',
+                                   'Description': 'Control_mouse_I.D._355'},
+                        'PC.356': {'Treatment': 'Control',
+                                   'DOB': '20061126',
+                                   'Weight': '50',
+                                   'Description': 'Control_mouse_I.D._356'},
+                        'PC.481': {'Treatment': 'Control',
+                                   'DOB': '20070314',
+                                   'Weight': '52',
+                                   'Description': 'Control_mouse_I.D._481'},
+                        'PC.593': {'Treatment': 'Control',
+                                   'DOB': '20071210',
+                                   'Weight': '57',
+                                   'Description': 'Control_mouse_I.D._593'},
+                        'PC.607': {'Treatment': 'Fast',
+                                   'DOB': '20071112',
+                                   'Weight': '65',
+                                   'Description': 'Fasting_mouse_I.D._607'},
+                        'PC.634': {'Treatment': 'Fast',
+                                   'DOB': '20080116',
+                                   'Weight': '68',
+                                   'Description': 'Fasting_mouse_I.D._634'},
+                        'PC.635': {'Treatment': 'Fast',
+                                   'DOB': '20080116',
+                                   'Weight': '70',
+                                   'Description': 'Fasting_mouse_I.D._635'},
+                        'PC.636': {'Treatment': 'Fast',
+                                   'DOB': '20080116',
+                                   'Weight': '72',
+                                   'Description': 'Fasting_mouse_I.D._636'}}
+        self.metadata_map = pd.DataFrame.from_dict(metadata_map,
+                                                   orient='index')
+
+        self.prop_expl = np.array([25.6216900347, 15.7715955926,
+                                   14.1215046787, 11.6913885817, 9.83044890697,
+                                   8.51253468595, 7.88775505332, 6.56308246609,
+                                   4.42499350906e-16])
+
+        gr_wo_msg = GroupResults('Foo', np.array([-2.6750, -0.2510,
+                                                  -2.8322, 0.]),
+                                 -1.4398, {'mean': -1.4398, 'std': 1.3184},
+                                 None)
+        gr_w_msg = GroupResults('Bar', np.array([9.6823, 2.9511, 5.2434]),
+                                5.9589, {'mean': 5.9589, 'std': 2.7942},
+                                "Cannot calculate the first difference "
+                                "with a window of size (3).")
+        self.groups = [gr_wo_msg, gr_w_msg]
+
+        cr_no_data = CategoryResults('foo', None, None,
+                                     'This group can not be used. All groups '
+                                     'should have more than 1 element.')
+        cr_data = CategoryResults('bar', 0.0110, self.groups, None)
+        self.categories = [cr_no_data, cr_data]
+
+        vr = GradientANOVAResults('wdiff', True, self.categories)
+
+        description = CategoryResults('Description', None, None,
+                                      'This group can not be used. All groups '
+                                      'should have more than 1 element.')
+        weight = CategoryResults('Weight', None, None,
+                                 'This group can not be used. All groups '
+                                 'should have more than 1 element.')
+        dob = CategoryResults('DOB', None, None,
+                              'This group can not be used. All groups '
+                              'should have more than 1 element.')
+        control_group = GroupResults('Control', np.array([2.3694, 3.3716,
+                                                          5.4452, 4.5704,
+                                                          4.4972]),
+                                     4.0508, {'avg': 4.0508}, None)
+        fast_group = GroupResults('Fast', np.array([7.2220, 4.2726, 1.1169,
+                                                    4.0271]),
+                                  4.1596, {'avg': 4.1596}, None)
+        treatment = CategoryResults('Treatment', 0.9331,
+                                    [control_group, fast_group], None)
+        vr_real = GradientANOVAResults('avg', False, [description, weight, dob,
+                                                      treatment])
+
+        self.vec_results = [vr, vr_real]
+
+    # This function makes the comparisons between the results classes easier
+    def assert_group_results_almost_equal(self, obs, exp):
+        """Tests that obs and exp are almost equal"""
+        self.assertEqual(obs.name, exp.name)
+        npt.assert_almost_equal(obs.trajectory, exp.trajectory)
+        npt.assert_almost_equal(obs.mean, exp.mean)
+        self.assertEqual(obs.info.keys(), exp.info.keys())
+        for key in obs.info:
+            npt.assert_almost_equal(obs.info[key], exp.info[key])
+        self.assertEqual(obs.message, exp.message)
+
+    def assert_category_results_almost_equal(self, obs, exp):
+        """Tests that obs and exp are almost equal"""
+        self.assertEqual(obs.category, exp.category)
+
+        if exp.probability is None:
+            self.assertTrue(obs.probability is None)
+            self.assertTrue(obs.groups is None)
+        else:
+            npt.assert_almost_equal(obs.probability, exp.probability)
+            for o, e in zip(sorted(obs.groups, key=attrgetter('name')),
+                            sorted(exp.groups, key=attrgetter('name'))):
+                self.assert_group_results_almost_equal(o, e)
+
+    def assert_gradientANOVA_results_almost_equal(self, obs, exp):
+        """Tests that obs and exp are almost equal"""
+        self.assertEqual(obs.algorithm, exp.algorithm)
+        self.assertEqual(obs.weighted, exp.weighted)
+
+        for o, e in zip(sorted(obs.categories, key=attrgetter('category')),
+                        sorted(exp.categories, key=attrgetter('category'))):
+            self.assert_category_results_almost_equal(o, e)
+
+
+class GradientTests(BaseTests):
+    def test_weight_by_vector(self):
+        """Correctly weights the vectors"""
+        trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
+                                             's2': np.array([2]),
+                                             's3': np.array([3]),
+                                             's4': np.array([4]),
+                                             's5': np.array([5]),
+                                             's6': np.array([6]),
+                                             's7': np.array([7]),
+                                             's8': np.array([8])},
+                                            orient='index')
+        trajectory.sort(columns=0, inplace=True)
+        w_vector = pd.Series(np.array([1, 5, 8, 12, 45, 80, 85, 90]),
+                             ['s1', 's2', 's3', 's4',
+                              's5', 's6', 's7', 's8']).astype(np.float64)
+        exp = pd.DataFrame.from_dict({'s1': np.array([1]),
+                                      's2': np.array([6.3571428571]),
+                                      's3': np.array([12.7142857142]),
+                                      's4': np.array([12.7142857142]),
+                                      's5': np.array([1.9264069264]),
+                                      's6': np.array([2.1795918367]),
+                                      's7': np.array([17.8]),
+                                      's8': np.array([20.3428571428])},
+                                     orient='index')
+        obs = _weight_by_vector(trajectory, w_vector)
+        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+
+        trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
+                                             's2': np.array([2]),
+                                             's3': np.array([3]),
+                                             's4': np.array([4]),
+                                             's5': np.array([5]),
+                                             's6': np.array([6]),
+                                             's7': np.array([7]),
+                                             's8': np.array([8])},
+                                            orient='index')
+        trajectory.sort(columns=0, inplace=True)
+        w_vector = pd.Series(np.array([1, 2, 3, 4, 5, 6, 7, 8]),
+                             ['s1', 's2', 's3', 's4',
+                              's5', 's6', 's7', 's8']).astype(np.float64)
+        exp = pd.DataFrame.from_dict({'s1': np.array([1]), 's2': np.array([2]),
+                                      's3': np.array([3]), 's4': np.array([4]),
+                                      's5': np.array([5]), 's6': np.array([6]),
+                                      's7': np.array([7]), 's8': np.array([8])
+                                      },
+                                     orient='index')
+        obs = _weight_by_vector(trajectory, w_vector)
+        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+
+        trajectory = pd.DataFrame.from_dict({'s2': np.array([2]),
+                                             's3': np.array([3]),
+                                             's4': np.array([4]),
+                                             's5': np.array([5]),
+                                             's6': np.array([6])},
+                                            orient='index')
+        trajectory.sort(columns=0, inplace=True)
+        w_vector = pd.Series(np.array([25, 30, 35, 40, 45]),
+                             ['s2', 's3', 's4', 's5', 's6']).astype(np.float64)
+        exp = pd.DataFrame.from_dict({'s2': np.array([2]), 's3': np.array([3]),
+                                      's4': np.array([4]), 's5': np.array([5]),
+                                      's6': np.array([6])}, orient='index')
+        obs = _weight_by_vector(trajectory, w_vector)
+        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+
+        trajectory = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
+                                             's2': np.array([2, 3, 4]),
+                                             's3': np.array([5, 6, 7]),
+                                             's4': np.array([8, 9, 10])},
+                                            orient='index')
+        trajectory.sort(columns=0, inplace=True)
+        w_vector = pd.Series(np.array([1, 2, 3, 4]),
+                             ['s1', 's2', 's3', 's4']).astype(np.float64)
+        exp = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
+                                      's2': np.array([2, 3, 4]),
+                                      's3': np.array([5, 6, 7]),
+                                      's4': np.array([8, 9, 10])},
+                                     orient='index')
+        obs = _weight_by_vector(trajectory, w_vector)
+        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+
+        sample_ids = ['PC.356', 'PC.481', 'PC.355', 'PC.593', 'PC.354']
+        trajectory = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
+                                                                 1.37977545,
+                                                                 -4.9706303]),
+                                             'PC.481': np.array([0.79151484,
+                                                                 -0.70387996,
+                                                                 1.89223152]),
+                                             'PC.355': np.array([6.05869624,
+                                                                 3.44821245,
+                                                                 -0.42595788]),
+                                             'PC.593': np.array([5.18731945,
+                                                                 -1.81714206,
+                                                                 4.26216485]),
+                                             'PC.354': np.array([7.07588529,
+                                                                 -0.53917873,
+                                                                 0.89389158])
+                                             }, orient='index')
+        w_vector = pd.Series(np.array([50, 52, 55, 57, 60]),
+                             sample_ids).astype(np.float64)
+        exp = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
+                                                          1.37977545,
+                                                          -4.9706303]),
+                                      'PC.481': np.array([0.98939355,
+                                                          -0.87984995,
+                                                          2.3652894]),
+                                      'PC.355': np.array([5.04891353,
+                                                          2.87351038,
+                                                          -0.3549649]),
+                                      'PC.593': np.array([6.48414931,
+                                                          -2.27142757,
+                                                          5.32770606]),
+                                      'PC.354': np.array([5.89657108,
+                                                          -0.44931561,
+                                                          0.74490965])
+                                      }, orient='index')
+        obs = _weight_by_vector(trajectory.ix[sample_ids],
+                                w_vector[sample_ids])
+        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+
+    def test_weight_by_vector_single_element(self):
+        trajectory = pd.DataFrame.from_dict({'s1': np.array([42])},
+                                            orient='index')
+        w_vector = pd.Series(np.array([5]), ['s1']).astype(np.float64)
+
+        obs = _weight_by_vector(trajectory, w_vector)
+        pdt.assert_frame_equal(obs, trajectory)
+
+    def test_weight_by_vector_error(self):
+        """Raises an error with erroneous inputs"""
+        # Different vector lengths
+        with self.assertRaises(ValueError):
+            _weight_by_vector([1, 2, 3, 4], [1, 2, 3])
+
+        # Inputs are not iterables
+        with self.assertRaises(TypeError):
+            _weight_by_vector(9, 1)
+
+        # Weighting vector is not a gradient
+        with self.assertRaises(ValueError):
+            _weight_by_vector([1, 2, 3, 4], [1, 2, 3, 3])
+
+    def test_ANOVA_trajectories(self):
+        """Correctly performs the check before running ANOVA"""
+        # Only one group in a given category
+        group = GroupResults('Bar', np.array([2.3694943596755276,
+                                              3.3716388181385781,
+                                              5.4452089176253367,
+                                              4.5704258453173559,
+                                              4.4972603724478377]),
+                             4.05080566264, {'avg': 4.0508056626409275}, None)
+        obs = _ANOVA_trajectories('Foo', [group])
+        exp = CategoryResults('Foo', None, None,
+                              'Only one value in the group.')
+        self.assert_category_results_almost_equal(obs, exp)
+
+        # One element have only one element
+        group2 = GroupResults('FooBar', np.array([4.05080566264]),
+                              4.05080566264, {'avg': 4.05080566264}, None)
+        obs = _ANOVA_trajectories('Foo', [group, group2])
+        exp = CategoryResults('Foo', None, None,
+                              'This group can not be used. All groups '
+                              'should have more than 1 element.')
+        self.assert_category_results_almost_equal(obs, exp)
+
+        gr1 = GroupResults('Foo', np.array([-0.219044992, 0.079674486,
+                                            0.09233683]),
+                           -0.015677892, {'avg': -0.015677892}, None)
+        gr2 = GroupResults('Bar', np.array([-0.042258081, 0.000204041,
+                                            0.024837603]),
+                           -0.0732878716, {'avg': -0.0732878716}, None)
+        gr3 = GroupResults('FBF', np.array([0.080504323, -0.212014503,
+                                            -0.088353435]),
+                           -0.0057388123, {'avg': -0.0057388123}, None)
+        obs = _ANOVA_trajectories('Cat', [gr1, gr2, gr3])
+        exp = CategoryResults('Cat', 0.8067456876, [gr1, gr2, gr3], None)
+        self.assert_category_results_almost_equal(obs, exp)
+
+
+class GroupResultsTests(BaseTests):
+    def test_to_file(self):
+        out_paths = ['gr_wo_msg_out', 'gr_w_msg_out']
+        raw_paths = ['gr_wo_msg_raw', 'gr_w_msg_raw']
+
+        for gr, out_fp, raw_fp in zip(self.groups, out_paths, raw_paths):
+            obs_out_f = StringIO()
+            obs_raw_f = StringIO()
+            gr.to_files(obs_out_f, obs_raw_f)
+            obs_out = obs_out_f.getvalue()
+            obs_raw = obs_raw_f.getvalue()
+            obs_out_f.close()
+            obs_raw_f.close()
+
+            with open(get_data_path(out_fp), 'U') as f:
+                exp_out = f.read()
+
+            with open(get_data_path(raw_fp), 'U') as f:
+                exp_raw = f.read()
+
+            self.assertEqual(obs_out, exp_out)
+            self.assertEqual(obs_raw, exp_raw)
+
+
+class CategoryResultsTests(BaseTests):
+    def test_to_file(self):
+        out_paths = ['cr_no_data_out', 'cr_data_out']
+        raw_paths = ['cr_no_data_raw', 'cr_data_raw']
+
+        for cat, out_fp, raw_fp in zip(self.categories, out_paths, raw_paths):
+            obs_out_f = StringIO()
+            obs_raw_f = StringIO()
+            cat.to_files(obs_out_f, obs_raw_f)
+            obs_out = obs_out_f.getvalue()
+            obs_raw = obs_raw_f.getvalue()
+            obs_out_f.close()
+            obs_raw_f.close()
+
+            with open(get_data_path(out_fp), 'U') as f:
+                exp_out = f.read()
+
+            with open(get_data_path(raw_fp), 'U') as f:
+                exp_raw = f.read()
+
+            self.assertEqual(obs_out, exp_out)
+            self.assertEqual(obs_raw, exp_raw)
+
+
+class GradientANOVAResultsTests(BaseTests):
+    def test_to_file(self):
+        out_paths = ['vr_out']
+        raw_paths = ['vr_raw']
+
+        for vr, out_fp, raw_fp in zip(self.vec_results, out_paths, raw_paths):
+            obs_out_f = StringIO()
+            obs_raw_f = StringIO()
+            vr.to_files(obs_out_f, obs_raw_f)
+            obs_out = obs_out_f.getvalue()
+            obs_raw = obs_raw_f.getvalue()
+            obs_out_f.close()
+            obs_raw_f.close()
+
+            with open(get_data_path(out_fp), 'U') as f:
+                exp_out = f.read()
+
+            with open(get_data_path(raw_fp), 'U') as f:
+                exp_raw = f.read()
+
+            self.assertEqual(obs_out, exp_out)
+            self.assertEqual(obs_raw, exp_raw)
+
+
+class GradientANOVATests(BaseTests):
+    def test_init(self):
+        """Correctly initializes the class attributes"""
+        # Note self._groups is tested on test_make_groups
+        # so we are not testing it here
+
+        # Test with weighted = False
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
+
+        pdt.assert_frame_equal(bv._coords, self.coords_3axes)
+        exp_prop_expl = np.array([25.6216900347, 15.7715955926,
+                                  14.1215046787])
+        npt.assert_equal(bv._prop_expl, exp_prop_expl)
+        pdt.assert_frame_equal(bv._metadata_map, self.metadata_map)
+        self.assertTrue(bv._weighting_vector is None)
+        self.assertFalse(bv._weighted)
+
+        # Test with weighted = True
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                           sort_category='Weight', weighted=True)
+
+        pdt.assert_frame_equal(bv._coords, self.coords_3axes)
+        npt.assert_equal(bv._prop_expl, exp_prop_expl)
+        pdt.assert_frame_equal(bv._metadata_map, self.metadata_map)
+        exp_weighting_vector = pd.Series(
+            np.array([60, 55, 50, 52, 57, 65, 68, 70, 72]),
+            ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',
+             'PC.634', 'PC.635', 'PC.636']
+            ).astype(np.float64)
+        pdt.assert_series_equal(bv._weighting_vector, exp_weighting_vector)
+        self.assertTrue(bv._weighted)
+
+    def test_init_error(self):
+        """Raises an error with erroneous inputs"""
+        # Raises ValueError if any category in trajectory_categories is not
+        # present in metadata_map
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                          trajectory_categories=['foo'])
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                          trajectory_categories=['Weight', 'Treatment', 'foo'])
+
+        # Raises ValueError if sort_category is not present in metadata_map
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                          sort_category='foo')
+
+        # Raises ValueError if weighted == True and sort_category == None
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                          weighted=True)
+
+        # Raises ValueError if weighted == True and the values under
+        # sort_category are not numerical
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                          sort_category='Treatment', weighted=True)
+
+        # Raises ValueError if axes > len(prop_expl)
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                          axes=10)
+
+        # Raises ValueError if axes < 0
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                          axes=-1)
+
+    def test_normalize_samples(self):
+        """Correctly normalizes the samples between coords and metadata_map"""
+        coord_data = {
+            'PC.636': np.array([-0.212230626531, 0.216034194368,
+                                0.03532727349]),
+            'PC.635': np.array([-0.277487312135, -0.0295483215975,
+                                -0.0744173437992]),
+            'PC.355': np.array([0.236467470907, 0.21863434374,
+                                -0.0301637746424]),
+            'PC.607': np.array([-0.105517545144, -0.41405687433,
+                                -0.150073017617]),
+            'PC.634': np.array([-0.371636765565, 0.115484234741,
+                                0.0721996475289])
+            }
+        subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
+
+        metadata_map = {'PC.355': {'Treatment': 'Control',
+                                   'DOB': '20061218',
+                                   'Weight': '55',
+                                   'Description': 'Control_mouse_I.D._355'},
+                        'PC.607': {'Treatment': 'Fast',
+                                   'DOB': '20071112',
+                                   'Weight': '65',
+                                   'Description': 'Fasting_mouse_I.D._607'},
+                        'PC.634': {'Treatment': 'Fast',
+                                   'DOB': '20080116',
+                                   'Weight': '68',
+                                   'Description': 'Fasting_mouse_I.D._634'},
+                        'PC.635': {'Treatment': 'Fast',
+                                   'DOB': '20080116',
+                                   'Weight': '70',
+                                   'Description': 'Fasting_mouse_I.D._635'},
+                        'PC.636': {'Treatment': 'Fast',
+                                   'DOB': '20080116',
+                                   'Weight': '72',
+                                   'Description': 'Fasting_mouse_I.D._636'}}
+        subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
+                                                     orient='index')
+
+        # Takes a subset from metadata_map
+        bv = GradientANOVA(subset_coords, self.prop_expl, self.metadata_map)
+        pdt.assert_frame_equal(bv._coords.sort(axis=0),
+                               subset_coords.sort(axis=0))
+        pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
+                               subset_metadata_map.sort(axis=0))
+
+        # Takes a subset from coords
+        bv = GradientANOVA(self.coords, self.prop_expl, subset_metadata_map)
+        pdt.assert_frame_equal(bv._coords.sort(axis=0),
+                               subset_coords.sort(axis=0))
+        pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
+                               subset_metadata_map.sort(axis=0))
+
+        # Takes a subset from metadata_map and coords at the same time
+        coord_data = {
+            'PC.636': np.array([-0.212230626531, 0.216034194368,
+                                0.03532727349]),
+            'PC.635': np.array([-0.277487312135, -0.0295483215975,
+                                -0.0744173437992]),
+            'PC.355': np.array([0.236467470907, 0.21863434374,
+                                -0.0301637746424])
+            }
+        subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
+
+        metadata_map = {'PC.355': {'Treatment': 'Control',
+                                   'DOB': '20061218',
+                                   'Weight': '55',
+                                   'Description': 'Control_mouse_I.D._355'},
+                        'PC.607': {'Treatment': 'Fast',
+                                   'DOB': '20071112',
+                                   'Weight': '65',
+                                   'Description': 'Fasting_mouse_I.D._607'},
+                        'PC.634': {'Treatment': 'Fast',
+                                   'DOB': '20080116',
+                                   'Weight': '68',
+                                   'Description': 'Fasting_mouse_I.D._634'}}
+        subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
+                                                     orient='index')
+
+        bv = GradientANOVA(subset_coords, self.prop_expl, subset_metadata_map)
+        exp_coords = pd.DataFrame.from_dict(
+            {'PC.355': np.array([0.236467470907, 0.21863434374,
+                                 -0.0301637746424])},
+            orient='index')
+        pdt.assert_frame_equal(bv._coords.sort(axis=0),
+                               exp_coords.sort(axis=0))
+        exp_metadata_map = pd.DataFrame.from_dict(
+            {'PC.355': {'Treatment': 'Control',
+                        'DOB': '20061218',
+                        'Weight': '55',
+                        'Description': 'Control_mouse_I.D._355'}},
+            orient='index')
+        pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
+                               exp_metadata_map.sort(axis=0))
+
+    def test_normalize_samples_error(self):
+        """Raises an error if coords and metadata_map does not have samples in
+        common"""
+        error_metadata_map = pd.DataFrame.from_dict(
+            {'Foo': {'Treatment': 'Control',
+                     'DOB': '20061218',
+                     'Weight': '55',
+                     'Description': 'Control_mouse_I.D._355'},
+             'Bar': {'Treatment': 'Fast',
+                     'DOB': '20071112',
+                     'Weight': '65',
+                     'Description': 'Fasting_mouse_I.D._607'}},
+            orient='index')
+        with self.assertRaises(ValueError):
+            GradientANOVA(self.coords, self.prop_expl, error_metadata_map)
+
+    def test_make_groups(self):
+        """Correctly generates the groups for trajectory_categories"""
+        # Test with all categories
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
+        exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
+                                                'PC.481', 'PC.593'],
+                                    'Fast': ['PC.607', 'PC.634',
+                                             'PC.635', 'PC.636']},
+                      'DOB': {'20061218': ['PC.354', 'PC.355'],
+                              '20061126': ['PC.356'],
+                              '20070314': ['PC.481'],
+                              '20071210': ['PC.593'],
+                              '20071112': ['PC.607'],
+                              '20080116': ['PC.634', 'PC.635', 'PC.636']},
+                      'Weight': {'60': ['PC.354'],
+                                 '55': ['PC.355'],
+                                 '50': ['PC.356'],
+                                 '52': ['PC.481'],
+                                 '57': ['PC.593'],
+                                 '65': ['PC.607'],
+                                 '68': ['PC.634'],
+                                 '70': ['PC.635'],
+                                 '72': ['PC.636']},
+                      'Description': {'Control_mouse_I.D._354': ['PC.354'],
+                                      'Control_mouse_I.D._355': ['PC.355'],
+                                      'Control_mouse_I.D._356': ['PC.356'],
+                                      'Control_mouse_I.D._481': ['PC.481'],
+                                      'Control_mouse_I.D._593': ['PC.593'],
+                                      'Fasting_mouse_I.D._607': ['PC.607'],
+                                      'Fasting_mouse_I.D._634': ['PC.634'],
+                                      'Fasting_mouse_I.D._635': ['PC.635'],
+                                      'Fasting_mouse_I.D._636': ['PC.636']}}
+        self.assertEqual(bv._groups, exp_groups)
+
+        # Test with user-defined categories
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
+                           trajectory_categories=['Treatment', 'DOB'])
+        exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
+                                                'PC.481', 'PC.593'],
+                                    'Fast': ['PC.607', 'PC.634',
+                                             'PC.635', 'PC.636']},
+                      'DOB': {'20061218': ['PC.354', 'PC.355'],
+                              '20061126': ['PC.356'],
+                              '20070314': ['PC.481'],
+                              '20071210': ['PC.593'],
+                              '20071112': ['PC.607'],
+                              '20080116': ['PC.634', 'PC.635', 'PC.636']}}
+        self.assertEqual(bv._groups, exp_groups)
+
+    def test_make_groups_natural_sorting(self):
+        # Ensure sample IDs are sorted using a natural sorting algorithm.
+        df = pd.DataFrame.from_dict({
+            'a2': {'Col1': 'foo', 'Col2': '1.0'},
+            'a1': {'Col1': 'bar', 'Col2': '-42.0'},
+            'a11.0': {'Col1': 'foo', 'Col2': '2e-5'},
+            'a-10': {'Col1': 'foo', 'Col2': '5'},
+            'a10': {'Col1': 'bar', 'Col2': '5'}},
+            orient='index')
+
+        coords = pd.DataFrame.from_dict({
+            'a10': np.array([-0.212230626531, 0.216034194368, 0.03532727349]),
+            'a11.0': np.array([-0.277487312135, -0.0295483215975,
+                               -0.0744173437992]),
+            'a1': np.array([0.220886492631, 0.0874848360559,
+                            -0.351990132198]),
+            'a2': np.array([0.0308923744062, -0.0446295973489,
+                            0.133996451689]),
+            'a-10': np.array([0.27616778138, -0.0341866951102,
+                              0.0633000238256])},
+            orient='index')
+
+        prop_expl = np.array([25.6216900347, 15.7715955926, 14.1215046787,
+                              11.6913885817, 9.83044890697])
+
+        # Sort by sample IDs.
+        ga = GradientANOVA(coords, prop_expl, df)
+
+        exp_groups = {
+            'Col1': {
+                'foo': ['a-10', 'a2', 'a11.0'],
+                'bar': ['a1', 'a10']
+            },
+            'Col2': {
+                '1.0': ['a2'],
+                '-42.0': ['a1'],
+                '2e-5': ['a11.0'],
+                '5': ['a-10', 'a10']
+            }
+        }
+
+        self.assertEqual(ga._groups, exp_groups)
+
+        # Sort sample IDs by Col2.
+        ga = GradientANOVA(coords, prop_expl, df,
+                           trajectory_categories=['Col1'],
+                           sort_category='Col2')
+
+        exp_groups = {
+            'Col1': {
+                'foo': ['a11.0', 'a2', 'a-10'],
+                'bar': ['a1', 'a10']
+            }
+        }
+
+        self.assertEqual(ga._groups, exp_groups)
+
+    def test_get_trajectories(self):
+        """Should raise a NotImplementedError as this is a base class"""
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
+        with self.assertRaises(NotImplementedError):
+            bv.get_trajectories()
+
+    def test_get_group_trajectories(self):
+        """Should raise a NotImplementedError in usual execution as this is
+        a base class"""
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
+        with self.assertRaises(NotImplementedError):
+            bv.get_trajectories()
+
+    def test_get_group_trajectories_error(self):
+        """Should raise a RuntimeError if the user call _get_group_trajectories
+        with erroneous inputs"""
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
+        with self.assertRaises(RuntimeError):
+            bv._get_group_trajectories("foo", ['foo'])
+        with self.assertRaises(RuntimeError):
+            bv._get_group_trajectories("bar", [])
+
+    def test_compute_trajectories_results(self):
+        """Should raise a NotImplementedError as this is a base class"""
+        bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
+        with self.assertRaises(NotImplementedError):
+            bv._compute_trajectories_results("foo", [])
+
+
+class AverageGradientANOVATests(BaseTests):
+    def test_get_trajectories_all(self):
+        """get_trajectories returns the results of all categories"""
+        av = AverageGradientANOVA(self.coords, self.prop_expl,
+                                  self.metadata_map)
+        obs = av.get_trajectories()
+
+        exp_description = CategoryResults('Description', None, None,
+                                          'This group can not be used. All '
+                                          'groups should have more than 1 '
+                                          'element.')
+        exp_weight = CategoryResults('Weight', None, None,
+                                     'This group can not be used. All groups '
+                                     'should have more than 1 element.')
+        exp_dob = CategoryResults('DOB', None, None,
+                                  'This group can not be used. All groups '
+                                  'should have more than 1 element.')
+        exp_control_group = GroupResults('Control',
+                                         np.array([2.3694943596755276,
+                                                   3.3716388181385781,
+                                                   5.4452089176253367,
+                                                   4.5704258453173559,
+                                                   4.4972603724478377]),
+                                         4.05080566264,
+                                         {'avg': 4.0508056626409275}, None)
+        exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
+                                                        4.2726021564374372,
+                                                        1.1169097274372082,
+                                                        4.02717600030876]),
+                                      4.15968417703,
+                                      {'avg': 4.1596841770278292}, None)
+        exp_treatment = CategoryResults('Treatment', 0.93311555,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('avg', False, [exp_description, exp_weight,
+                                                  exp_dob, exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+    def test_get_trajectories_single(self):
+        """get_trajectories returns the results of the provided category"""
+        av = AverageGradientANOVA(self.coords, self.prop_expl,
+                                  self.metadata_map,
+                                  trajectory_categories=['Treatment'])
+        obs = av.get_trajectories()
+
+        exp_control_group = GroupResults('Control',
+                                         np.array([2.3694943596755276,
+                                                   3.3716388181385781,
+                                                   5.4452089176253367,
+                                                   4.5704258453173559,
+                                                   4.4972603724478377]),
+                                         4.05080566264,
+                                         {'avg': 4.0508056626409275}, None)
+        exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
+                                                        4.2726021564374372,
+                                                        1.1169097274372082,
+                                                        4.02717600030876]),
+                                      4.15968417703,
+                                      {'avg': 4.1596841770278292}, None)
+        exp_treatment = CategoryResults('Treatment', 0.93311555,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('avg', False, [exp_treatment])
+
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+    def test_get_trajectories_weighted(self):
+        """get_trajectories returns the correct weighted results"""
+        av = AverageGradientANOVA(self.coords, self.prop_expl,
+                                  self.metadata_map,
+                                  trajectory_categories=['Treatment'],
+                                  sort_category='Weight', weighted=True)
+        obs = av.get_trajectories()
+        exp_control_group = GroupResults('Control', np.array([5.7926887872,
+                                                              4.3242308936,
+                                                              2.9212403501,
+                                                              5.5400792151,
+                                                              1.2326804315]),
+                                         3.9621839355,
+                                         {'avg': 3.9621839355}, None)
+        exp_fast_group = GroupResults('Fast', np.array([7.2187223286,
+                                                        2.5522161282,
+                                                        2.2349795861,
+                                                        4.5278215248]),
+                                      4.1334348919,
+                                      {'avg': 4.1334348919}, None)
+        exp_treatment = CategoryResults('Treatment', 0.9057666800,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('avg', True, [exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+
+class TrajectoryGradientANOVATests(BaseTests):
+
+    def test_get_trajectories(self):
+        tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
+                                     self.metadata_map,
+                                     trajectory_categories=['Treatment'],
+                                     sort_category='Weight')
+        obs = tv.get_trajectories()
+        exp_control_group = GroupResults('Control', np.array([8.6681963576,
+                                                              7.0962717982,
+                                                              7.1036434615,
+                                                              4.0675712674]),
+                                         6.73392072123,
+                                         {'2-norm': 13.874494152}, None)
+        exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
+                                                        3.9163741156,
+                                                        4.4943507388]),
+                                      6.5466301150,
+                                      {'2-norm': 12.713431181}, None)
+        exp_treatment = CategoryResults('Treatment', 0.9374500147,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('trajectory', False, [exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+    def test_get_trajectories_weighted(self):
+        tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
+                                     self.metadata_map,
+                                     trajectory_categories=['Treatment'],
+                                     sort_category='Weight', weighted=True)
+        obs = tv.get_trajectories()
+        exp_control_group = GroupResults('Control', np.array([8.9850643421,
+                                                              6.1617529749,
+                                                              7.7989125908,
+                                                              4.9666249268]),
+                                         6.9780887086,
+                                         {'2-norm': 14.2894710091}, None)
+        exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
+                                                        2.9511115209,
+                                                        5.2434091953]),
+                                      5.9589630005,
+                                      {'2-norm': 11.3995901159}, None)
+        exp_treatment = CategoryResults('Treatment', 0.6248157720,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('trajectory', True, [exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+
+class FirstDifferenceGradientANOVATests(BaseTests):
+    def test_get_trajectories(self):
+        dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
+                                          self.metadata_map,
+                                          trajectory_categories=['Treatment'],
+                                          sort_category='Weight')
+        obs = dv.get_trajectories()
+        exp_control_group = GroupResults('Control', np.array([-1.5719245594,
+                                                              0.0073716633,
+                                                              -3.0360721941]),
+                                         -1.5335416967,
+                                         {'mean': -1.5335416967,
+                                          'std': 1.2427771485}, None)
+        exp_fast_group = GroupResults('Fast', np.array([-7.3127913749,
+                                                        0.5779766231]),
+                                      -3.3674073758,
+                                      {'mean': -3.3674073758,
+                                       'std': 3.9453839990}, None)
+        exp_treatment = CategoryResults('Treatment', 0.6015260608,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('diff', False, [exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+    def test_get_trajectories_weighted(self):
+        dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
+                                          self.metadata_map,
+                                          trajectory_categories=['Treatment'],
+                                          sort_category='Weight',
+                                          weighted=True)
+        obs = dv.get_trajectories()
+        exp_control_group = GroupResults('Control', np.array([-2.8233113671,
+                                                              1.6371596158,
+                                                              -2.8322876639]),
+                                         -1.3394798050,
+                                         {'mean': -1.3394798050,
+                                          'std': 2.1048051097}, None)
+        exp_fast_group = GroupResults('Fast', np.array([-6.7312567642,
+                                                        2.2922976743]),
+                                      -2.2194795449,
+                                      {'mean': -2.2194795449,
+                                       'std': 4.5117772193}, None)
+        exp_treatment = CategoryResults('Treatment', 0.8348644420,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('diff', True, [exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+
+class WindowDifferenceGradientANOVATests(BaseTests):
+    def test_get_trajectories(self):
+        wdv = WindowDifferenceGradientANOVA(
+            self.coords, self.prop_expl, self.metadata_map, 3,
+            trajectory_categories=['Treatment'], sort_category='Weight')
+        obs = wdv.get_trajectories()
+        exp_control_group = GroupResults('Control', np.array([-2.5790341819,
+                                                              -2.0166764661,
+                                                              -3.0360721941,
+                                                              0.]),
+                                         -1.9079457105,
+                                         {'mean': -1.9079457105,
+                                          'std': 1.1592139913}, None)
+        exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
+                                                        3.9163741156,
+                                                        4.4943507388]),
+                                      6.5466301150,
+                                      {'mean': 6.5466301150,
+                                       'std': 3.3194494926},
+                                      "Cannot calculate the first difference "
+                                      "with a window of size (3).")
+        exp_treatment = CategoryResults('Treatment', 0.0103976830,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('wdiff', False, [exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+    def test_get_trajectories_weighted(self):
+        wdv = WindowDifferenceGradientANOVA(
+            self.coords, self.prop_expl, self.metadata_map, 3,
+            trajectory_categories=['Treatment'], sort_category='Weight',
+            weighted=True)
+        obs = wdv.get_trajectories()
+        exp_control_group = GroupResults('Control', np.array([-2.6759675112,
+                                                              -0.2510321601,
+                                                              -2.8322876639,
+                                                              0.]),
+                                         -1.4398218338,
+                                         {'mean': -1.4398218338,
+                                          'std': 1.31845790844}, None)
+        exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
+                                                        2.9511115209,
+                                                        5.2434091953]),
+                                      5.9589630005,
+                                      {'mean': 5.9589630005,
+                                       'std': 2.7942163293},
+                                      "Cannot calculate the first difference "
+                                      "with a window of size (3).")
+        exp_treatment = CategoryResults('Treatment', 0.0110675605,
+                                        [exp_control_group, exp_fast_group],
+                                        None)
+        exp = GradientANOVAResults('wdiff', True, [exp_treatment])
+        self.assert_gradientANOVA_results_almost_equal(obs, exp)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/tests/test_misc.py b/skbio/stats/tests/test_misc.py
new file mode 100644
index 0000000..e00f24f
--- /dev/null
+++ b/skbio/stats/tests/test_misc.py
@@ -0,0 +1,99 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from unittest import TestCase, main
+
+import numpy as np
+
+from skbio.stats import p_value_to_str
+from skbio.stats._misc import _pprint_strs
+
+
+class PValueToStrTests(TestCase):
+    def setUp(self):
+        self.p_value = 0.119123123123
+
+    def test_valid_input(self):
+        obs = p_value_to_str(self.p_value, 100)
+        self.assertEqual(obs, '0.12')
+
+        obs = p_value_to_str(self.p_value, 250)
+        self.assertEqual(obs, '0.12')
+
+        obs = p_value_to_str(self.p_value, 1000)
+        self.assertEqual(obs, '0.119')
+
+        obs = p_value_to_str(0.0055623489, 999)
+        self.assertEqual(obs, '0.006')
+
+    def test_too_few_permutations(self):
+        obs = p_value_to_str(self.p_value, 9)
+        self.assertEqual(obs, 'Too few permutations to compute p-value '
+                              '(permutations = 9)')
+
+        obs = p_value_to_str(self.p_value, 1)
+        self.assertEqual(obs, 'Too few permutations to compute p-value '
+                              '(permutations = 1)')
+
+        obs = p_value_to_str(self.p_value, 0)
+        self.assertEqual(obs, 'Too few permutations to compute p-value '
+                              '(permutations = 0)')
+
+    def test_missing_or_invalid_p_value(self):
+        obs = p_value_to_str(None, 0)
+        self.assertEqual(obs, 'N/A')
+
+        obs = p_value_to_str(np.nan, 0)
+        self.assertEqual(obs, 'N/A')
+
+
+class PPrintStrsTests(TestCase):
+    def test_truncation(self):
+        # truncation between items (on comma)
+        exp = "'a', ..."
+        obs = _pprint_strs(['a', 'b', 'c'], max_chars=4)
+        self.assertEqual(obs, exp)
+
+        # truncation between items (on space)
+        exp = "'a', ..."
+        obs = _pprint_strs(['a', 'b', 'c'], max_chars=5)
+        self.assertEqual(obs, exp)
+
+        # truncation on item
+        exp = "'a', ..."
+        obs = _pprint_strs(['a', 'b', 'c'], max_chars=6)
+        self.assertEqual(obs, exp)
+
+        # truncation (no items)
+        exp = "..."
+        obs = _pprint_strs(['a', 'b', 'c'], max_chars=2)
+        self.assertEqual(obs, exp)
+
+    def test_no_truncation(self):
+        exp = "'a'"
+        obs = _pprint_strs(['a'], max_chars=3)
+        self.assertEqual(obs, exp)
+
+        exp = "'a', 'b', 'c'"
+        obs = _pprint_strs(['a', 'b', 'c'])
+        self.assertEqual(obs, exp)
+
+        exp = "'a', 'b', 'c'"
+        obs = _pprint_strs(['a', 'b', 'c'], max_chars=13)
+        self.assertEqual(obs, exp)
+
+    def test_non_default_delimiter_and_suffix(self):
+        exp = "'abc','defg',...."
+        obs = _pprint_strs(['abc', 'defg', 'hi', 'jklmno'], max_chars=14,
+                           delimiter=',', suffix='....')
+        self.assertEqual(obs, exp)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/tests/test_power.py b/skbio/stats/tests/test_power.py
new file mode 100644
index 0000000..a332d79
--- /dev/null
+++ b/skbio/stats/tests/test_power.py
@@ -0,0 +1,398 @@
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+from __future__ import absolute_import, division, print_function
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+import pandas as pd
+from scipy.stats import kruskal
+
+from skbio.stats.power import (subsample_power,
+                               subsample_paired_power,
+                               _check_strs,
+                               confidence_bound,
+                               _calculate_power,
+                               _compare_distributions,
+                               _calculate_power_curve,
+                               bootstrap_power_curve,
+                               paired_subsamples)
+
+
+class PowerAnalysisTest(TestCase):
+
+    def setUp(self):
+        # Defines a testing function
+        def test_meta(ids, meta, cat, div):
+            """Checks thhe div metric with a kruskal wallis"""
+            out = [meta.loc[id_, div] for id_ in ids]
+            return kruskal(*out)[1]
+        self.test_meta = test_meta
+        # Sets the random seed
+        np.random.seed(5)
+        # Sets up the distributions of data for use
+        self.s1 = np.arange(0, 10, 1)
+        # Sets up two distributions which will never be equal by a rank-sum
+        # test.
+        self.samps = [np.ones((10))/10., np.ones((10))]
+        self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)]
+        # Sets up a vector of alpha values
+        self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3)
+        # Sets up a vector of samples
+        self.num_samps = np.arange(10, 100, 10)
+        # Sets up the test function, a rank-sum test
+        self.f = lambda x: kruskal(*x)[1]
+        # Sets up a mapping file
+        meta = {'GW': {'INT': 'N', 'ABX': np.nan, 'DIV': 19.5, 'AGE': '30s',
+                       'SEX': 'M'},
+                'CB': {'INT': 'Y', 'ABX': np.nan, 'DIV': 42.7, 'AGE': '30s',
+                       'SEX': 'M'},
+                'WM': {'INT': 'N', 'ABX': 'N', 'DIV': 27.5, 'AGE': '20s',
+                       'SEX': 'F'},
+                'MH': {'INT': 'Y', 'ABX': 'N', 'DIV': 62.3, 'AGE': '30s',
+                       'SEX': 'F'},
+                'CD': {'INT': 'Y', 'ABX': 'Y', 'DIV': 36.4, 'AGE': '40s',
+                       'SEX': 'F'},
+                'LF': {'INT': 'Y', 'ABX': 'N', 'DIV': 50.2, 'AGE': '20s',
+                       'SEX': 'M'},
+                'PP': {'INT': 'N', 'ABX': 'Y', 'DIV': 10.8, 'AGE': '30s',
+                       'SEX': 'F'},
+                'MM': {'INT': 'N', 'ABX': 'N', 'DIV': 55.6, 'AGE': '40s',
+                       'SEX': 'F'},
+                'SR': {'INT': 'N', 'ABX': 'Y', 'DIV': 2.2, 'AGE': '20s',
+                       'SEX': 'M'},
+                'TS': {'INT': 'N', 'ABX': 'Y', 'DIV': 16.1, 'AGE': '40s',
+                       'SEX': 'M'},
+                'PC': {'INT': 'Y', 'ABX': 'N', 'DIV': 82.6, 'AGE': '40s',
+                       'SEX': 'M'},
+                'NR': {'INT': 'Y', 'ABX': 'Y', 'DIV': 15.7, 'AGE': '20s',
+                       'SEX': 'F'}}
+        self.meta = pd.DataFrame.from_dict(meta, orient='index')
+        self.meta_f = lambda x: test_meta(x, self.meta, 'INT', 'DIV')
+        self.counts = np.array([5, 15, 25, 35, 45])
+        self.powers = [np.array([[0.105, 0.137, 0.174, 0.208, 0.280],
+                                 [0.115, 0.135, 0.196, 0.204, 0.281],
+                                 [0.096, 0.170, 0.165, 0.232, 0.256],
+                                 [0.122, 0.157, 0.202, 0.250, 0.279],
+                                 [0.132, 0.135, 0.173, 0.203, 0.279]]),
+                       np.array([[0.157, 0.345, 0.522, 0.639, 0.739],
+                                 [0.159, 0.374, 0.519, 0.646, 0.757],
+                                 [0.161, 0.339, 0.532, 0.634, 0.745],
+                                 [0.169, 0.372, 0.541, 0.646, 0.762],
+                                 [0.163, 0.371, 0.522, 0.648, 0.746]]),
+                       np.array([[0.276, 0.626, 0.865, 0.927, 0.992],
+                                 [0.267, 0.667, 0.848, 0.937, 0.978],
+                                 [0.236, 0.642, 0.850, 0.935, 0.977],
+                                 [0.249, 0.633, 0.828, 0.955, 0.986],
+                                 [0.249, 0.663, 0.869, 0.951, 0.985]])]
+        self.power_alpha = 0.1
+        self.effects = np.array([0.15245, 0.34877, 0.55830])
+        self.bounds = np.array([0.01049, 0.00299, 0.007492])
+        self.labels = np.array(['Age', 'Intervenption', 'Antibiotics'])
+        self.cats = np.array(['AGE', 'INT', 'ABX'])
+        self.cat = "AGE"
+        self.control_cats = ['INT', 'ABX']
+
+    def test_subsample_power_matched_relationship_error(self):
+        with self.assertRaises(ValueError):
+            subsample_power(self.f,
+                            samples=[np.ones((2)), np.ones((5))],
+                            draw_mode="matched")
+
+    def test_subsample_power_min_observations_error(self):
+        with self.assertRaises(ValueError):
+            subsample_power(self.f,
+                            samples=[np.ones((2)), np.ones((5))])
+
+    def test_subsample_power_interval_error(self):
+        with self.assertRaises(ValueError):
+            subsample_power(self.f,
+                            samples=[np.ones((3)), np.ones((5))],
+                            min_observations=2,
+                            min_counts=5,
+                            counts_interval=1000,
+                            max_counts=7)
+
+    def test_subsample_power_defaults(self):
+        test_p, test_c = subsample_power(self.f, self.pop,
+                                         num_iter=10, num_runs=5)
+        self.assertEqual(test_p.shape, (5, 4))
+        npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
+
+    def test_subsample_power(self):
+        test_p, test_c = subsample_power(self.f,
+                                         samples=self.pop,
+                                         num_iter=10,
+                                         num_runs=2,
+                                         min_counts=5)
+        self.assertEqual(test_p.shape, (2, 5))
+        npt.assert_array_equal(np.arange(5, 50, 10), test_c)
+
+    def test_subsample_paired_power_min_observations_error(self):
+        with self.assertRaises(ValueError):
+            subsample_paired_power(self.f,
+                                   self.meta,
+                                   cat=self.cat,
+                                   control_cats=self.control_cats)
+
+    def test_subsample_paired_power_interval_error(self):
+        with self.assertRaises(ValueError):
+            subsample_paired_power(self.f,
+                                   self.meta,
+                                   cat='INT',
+                                   control_cats=['SEX', 'AGE'],
+                                   min_observations=2,
+                                   counts_interval=12,
+                                   min_counts=5,
+                                   max_counts=7)
+
+    def test_subsample_paired_power(self):
+        known_c = np.array([1, 2, 3, 4, 5])
+        # Sets up the handling values
+        cat = 'INT'
+        control_cats = ['SEX']
+        # Tests for the control cats
+        test_p, test_c = subsample_paired_power(self.meta_f,
+                                                meta=self.meta,
+                                                cat=cat,
+                                                control_cats=control_cats,
+                                                min_observations=1,
+                                                counts_interval=1,
+                                                num_iter=10,
+                                                num_runs=2)
+        # Test the output shapes are sane
+        npt.assert_array_equal(test_p.shape, (2, 5))
+        npt.assert_array_equal(known_c, test_c)
+
+    def test__check_strs_str(self):
+        self.assertTrue(_check_strs('string'))
+
+    def test__check_strs_num(self):
+        self.assertTrue(_check_strs(4.2))
+
+    def test__check_str_nan(self):
+        self.assertFalse(_check_strs(np.nan))
+
+    def test__check_str_error(self):
+        with self.assertRaises(TypeError):
+            _check_strs(self.f)
+
+    def test_confidence_bound_default(self):
+        # Sets the know confidence bound
+        known = 2.2830070
+        test = confidence_bound(self.s1)
+        npt.assert_almost_equal(test, known, 3)
+
+    def test_confidence_bound_df(self):
+        known = 2.15109
+        test = confidence_bound(self.s1, df=15)
+        npt.assert_almost_equal(known, test, 3)
+
+    def test_confidence_bound_alpha(self):
+        known = 3.2797886
+        test = confidence_bound(self.s1, alpha=0.01)
+        npt.assert_almost_equal(known, test, 3)
+
+    def test_confidence_bound_nan(self):
+        # Sets the value to test
+        samples = np.array([[4, 3.2, 3.05],
+                            [2, 2.8, 2.95],
+                            [5, 2.9, 3.07],
+                            [1, 3.1, 2.93],
+                            [3, np.nan, 3.00]])
+        # Sets the know value
+        known = np.array([2.2284, 0.2573, 0.08573])
+        # Tests the function
+        test = confidence_bound(samples, axis=0)
+        npt.assert_almost_equal(known, test, 3)
+
+    def test_confidence_bound_axis_none(self):
+        # Sets the value to test
+        samples = np.array([[4, 3.2, 3.05],
+                            [2, 2.8, 2.95],
+                            [5, 2.9, 3.07],
+                            [1, 3.1, 2.93],
+                            [3, np.nan, 3.00]])
+        # Sest the known value
+        known = 0.52852
+        # Tests the output
+        test = confidence_bound(samples, axis=None)
+        npt.assert_almost_equal(known, test, 3)
+
+    def test__calculate_power(self):
+        # Sets up the values to test
+        crit = 0.025
+        # Sets the known value
+        known = 0.5
+        # Calculates the test value
+        test = _calculate_power(self.alpha, crit)
+        # Checks the test value
+        npt.assert_almost_equal(known, test)
+
+    def test__compare_distributions_mode_error(self):
+        with self.assertRaises(ValueError):
+            _compare_distributions(self.f, self.samps, mode='fig')
+
+    def test__compare_distributions_count_error(self):
+        with self.assertRaises(ValueError):
+            _compare_distributions(self.f, self.samps, counts=[1, 2, 3],
+                                   num_iter=100)
+
+    def test__compare_distributions_matched_length_error(self):
+        with self.assertRaises(ValueError):
+            _compare_distributions(self.f, [np.ones((5)), np.zeros((6))],
+                                   mode="matched")
+
+    def test__compare_distributions_sample_counts_error(self):
+        with self.assertRaises(ValueError):
+            _compare_distributions(self.f, [self.pop[0][:5], self.pop[1]],
+                                   25)
+
+    def test__compare_distributions_all_mode(self):
+        known = np.ones((100))*0.0026998
+        test = _compare_distributions(self.f, self.samps, num_iter=100)
+        npt.assert_allclose(known, test, 5)
+
+    def test__compare_distributions_matched_mode(self):
+        # Sets the known value
+        known_mean = 0.162195
+        known_std = 0.121887
+        known_shape = (100,)
+        # Sets the sample value
+        # Tests the sample value
+        test = _compare_distributions(self.f, self.pop, mode='matched',
+                                      num_iter=100)
+        npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02)
+        npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02)
+        self.assertEqual(known_shape, test.shape)
+
+    def test__calculate_power_curve_ratio_error(self):
+        with self.assertRaises(ValueError):
+            _calculate_power_curve(self.f, self.pop, self.num_samps,
+                                   ratio=np.array([0.1, 0.2, 0.3]),
+                                   num_iter=100)
+
+    def test__calculate_power_curve_default(self):
+        # Sets the know output
+        known = np.array([0.509, 0.822, 0.962, 0.997, 1.000, 1.000, 1.000,
+                          1.000,  1.000])
+
+        # Generates the test values.
+        test = _calculate_power_curve(self.f,
+                                      self.pop,
+                                      self.num_samps,
+                                      num_iter=100)
+        # Checks the samples returned sanely
+        npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
+
+    def test__calculate_power_curve_alpha(self):
+        # Sets the know output
+        known = np.array([0.31, 0.568, 0.842, 0.954, 0.995, 1.000, 1.000,
+                          1.000, 1.000])
+
+        # Generates the test values
+        test = _calculate_power_curve(self.f,
+                                      self.pop,
+                                      self.num_samps,
+                                      alpha=0.01,
+                                      num_iter=100)
+
+        # Checks the samples returned sanely
+        npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
+
+    def test__calculate_power_curve_ratio(self):
+        # Sets the know output
+        known = np.array([0.096, 0.333, 0.493, 0.743, 0.824, 0.937, 0.969,
+                          0.996, 0.998])
+
+        # Generates the test values
+        test = _calculate_power_curve(self.f,
+                                      self.pop,
+                                      self.num_samps,
+                                      ratio=np.array([0.25, 0.75]),
+                                      num_iter=100)
+
+        # Checks the samples returned sanely
+        npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
+
+    def test_bootstrap_power_curve(self):
+        # Sets the known values
+        known_mean = np.array([0.500, 0.82, 0.965, 0.995, 1.000, 1.000,
+                               1.000, 1.000,  1.000])
+        known_bound = np.array([0.03, 0.02, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00,
+                                0.00])
+        # Generates the test values
+        test_mean, test_bound = bootstrap_power_curve(self.f,
+                                                      self.pop,
+                                                      self.num_samps,
+                                                      num_iter=100)
+        # Checks the function returned sanely
+        npt.assert_allclose(test_mean, known_mean, rtol=0.05, atol=0.05)
+        npt.assert_allclose(test_bound, known_bound, rtol=0.1, atol=0.01)
+
+    def test_paired_subsamples_default(self):
+        # Sets the known np.array set
+        known_array = [sorted(['MM', 'SR', 'TS', 'GW', 'PP', 'WM']),
+                       sorted(['CD', 'LF', 'PC', 'CB', 'MH', 'NR'])]
+
+        # Gets the test value
+        cat = 'INT'
+        control_cats = ['SEX', 'AGE']
+        test_array = paired_subsamples(self.meta, cat, control_cats)
+        test_array[0] = sorted(test_array[0])
+        test_array[1] = sorted(test_array[1])
+        npt.assert_array_equal(known_array, test_array)
+
+    def test_paired_subsamples_break(self):
+        # Sets known np.array set
+        known_array = [np.array([]), np.array([])]
+        # Gets the test value
+        cat = 'ABX'
+        control_cats = ['SEX', 'AGE', 'INT']
+        test_array = paired_subsamples(self.meta, cat, control_cats)
+        npt.assert_array_equal(known_array, test_array)
+
+    def test_paired_subsample_undefined(self):
+        known_array = np.zeros((2, 0))
+        cat = 'INT'
+        order = ['Y', 'N']
+        control_cats = ['AGE', 'ABX', 'SEX']
+        test_array = paired_subsamples(self.meta, cat, control_cats,
+                                       order=order)
+        npt.assert_array_equal(test_array, known_array)
+
+    def test_paired_subsample_fewer(self):
+        # Set known value
+        known_array = {'PP', 'MH', 'CD', 'PC', 'TS', 'MM'}
+        # Sets up test values
+        cat = 'AGE'
+        order = ['30s', '40s']
+        control_cats = ['ABX']
+        test_array = paired_subsamples(self.meta, cat, control_cats,
+                                       order=order)
+        for v in test_array[1]:
+            self.assertTrue(v in known_array)
+        for v in test_array[1]:
+            self.assertTrue(v in known_array)
+
+    def test_paired_subsamples_not_strict(self):
+        known_array = [sorted(['WM', 'MM', 'GW', 'SR', 'TS']),
+                       sorted(['LF', 'PC', 'CB', 'NR', 'CD'])]
+
+        # Gets the test values
+        cat = 'INT'
+        control_cats = ['ABX', 'AGE']
+        test_array = paired_subsamples(self.meta, cat, control_cats,
+                                       strict_match=False)
+        test_array[0] = sorted(test_array[0])
+        test_array[1] = sorted(test_array[1])
+        npt.assert_array_equal(known_array, test_array)
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/tests/test_spatial.py b/skbio/stats/tests/test_spatial.py
new file mode 100644
index 0000000..eb5c342
--- /dev/null
+++ b/skbio/stats/tests/test_spatial.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+import numpy as np
+
+from skbio.stats.spatial import (procrustes, _get_disparity, _center,
+                                 _normalize)
+
+
+class ProcrustesTests(TestCase):
+
+    """test the procrustes module, using floating point numpy arrays
+    """
+
+    def setUp(self):
+        """creates inputs"""
+        # an L
+        self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
+
+        # a larger, shifted, mirrored L
+        self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
+
+        # an L shifted up 1, right 1, and with point 4 shifted an extra .5
+        # to the right
+        # pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
+        self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
+
+        # data4, data5 are standardized (trace(A*A') = 1).
+        # procrustes should return an identical copy if they are used
+        # as the first matrix argument.
+        shiftangle = np.pi / 8
+        self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
+                              [0, -1]], 'd') / np.sqrt(4)
+        self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
+                              [np.cos(np.pi / 2 - shiftangle),
+                               np.sin(np.pi / 2 - shiftangle)],
+                              [-np.cos(shiftangle),
+                               -np.sin(shiftangle)],
+                              [-np.cos(np.pi / 2 - shiftangle),
+                               -np.sin(np.pi / 2 - shiftangle)]],
+                              'd') / np.sqrt(4)
+
+    def test_procrustes(self):
+        """tests procrustes' ability to match two matrices.
+
+        the second matrix is a rotated, shifted, scaled, and mirrored version
+        of the first, in two dimensions only
+        """
+        # can shift, mirror, and scale an 'L'?
+        a, b, disparity = procrustes(self.data1, self.data2)
+        np.testing.assert_allclose(b, a)
+        np.testing.assert_almost_equal(disparity, 0.)
+
+        # if first mtx is standardized, leaves first mtx unchanged?
+        m4, m5, disp45 = procrustes(self.data4, self.data5)
+        np.testing.assert_equal(m4, self.data4)
+
+        # at worst, data3 is an 'L' with one point off by .5
+        m1, m3, disp13 = procrustes(self.data1, self.data3)
+        self.assertTrue(disp13 < 0.5 ** 2)
+
+    def test_procrustes2(self):
+        """procrustes disparity should not depend on order of matrices"""
+        m1, m3, disp13 = procrustes(self.data1, self.data3)
+        m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
+        np.testing.assert_almost_equal(disp13, disp31)
+
+        # try with 3d, 8 pts per
+        rand1 = np.array([[2.61955202,  0.30522265,  0.55515826],
+                         [0.41124708, -0.03966978, -0.31854548],
+                         [0.91910318,  1.39451809, -0.15295084],
+                         [2.00452023,  0.50150048,  0.29485268],
+                         [0.09453595,  0.67528885,  0.03283872],
+                         [0.07015232,  2.18892599, -1.67266852],
+                         [0.65029688,  1.60551637,  0.80013549],
+                         [-0.6607528,  0.53644208,  0.17033891]])
+
+        rand3 = np.array([[0.0809969,  0.09731461, -0.173442],
+                         [-1.84888465, -0.92589646, -1.29335743],
+                         [0.67031855, -1.35957463,  0.41938621],
+                         [0.73967209, -0.20230757,  0.52418027],
+                         [0.17752796,  0.09065607,  0.29827466],
+                         [0.47999368, -0.88455717, -0.57547934],
+                         [-0.11486344, -0.12608506, -0.3395779],
+                         [-0.86106154, -0.28687488,  0.9644429]])
+        res1, res3, disp13 = procrustes(rand1, rand3)
+        res3_2, res1_2, disp31 = procrustes(rand3, rand1)
+        np.testing.assert_almost_equal(disp13, disp31)
+
+    def test_procrustes_shape_mismatch(self):
+        with self.assertRaises(ValueError):
+            procrustes(np.array([[1, 2], [3, 4]]),
+                       np.array([[5, 6, 7], [8, 9, 10]]))
+
+    def test_procrustes_empty_rows_or_cols(self):
+        empty = np.array([[]])
+        with self.assertRaises(ValueError):
+            procrustes(empty, empty)
+
+    def test_procrustes_no_variation(self):
+        with self.assertRaises(ValueError):
+            procrustes(np.array([[42, 42], [42, 42]]),
+                       np.array([[45, 45], [45, 45]]))
+
+    def test_get_disparity(self):
+        """tests get_disparity"""
+        disp = _get_disparity(self.data1, self.data3)
+        disp2 = _get_disparity(self.data3, self.data1)
+        np.testing.assert_equal(disp, disp2)
+        np.testing.assert_equal(disp, (3. * 2. + (1. + 1.5 ** 2)))
+
+        d1 = np.append(self.data1, self.data1, 0)
+        d3 = np.append(self.data3, self.data3, 0)
+
+        disp3 = _get_disparity(d1, d3)
+        disp4 = _get_disparity(d3, d1)
+        np.testing.assert_equal(disp3, disp4)
+        # 2x points in same configuration should give 2x disparity
+        np.testing.assert_equal(disp3, 2. * disp)
+
+    def test_center(self):
+        centered_mtx = _center(self.data1)
+        column_means = centered_mtx.mean(0)
+        for col_mean in column_means:
+            np.testing.assert_equal(col_mean, 0.)
+
+    def test_normalize(self):
+        norm_mtx = _normalize(self.data1)
+        np.testing.assert_equal(np.trace(np.dot(norm_mtx,
+                                                np.transpose(norm_mtx))), 1.)
+
+    # match_points isn't yet tested, as it's almost a private function
+    # and test_procrustes() tests it implicitly.
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/stats/tests/test_subsample.py b/skbio/stats/tests/test_subsample.py
new file mode 100644
index 0000000..24ba16c
--- /dev/null
+++ b/skbio/stats/tests/test_subsample.py
@@ -0,0 +1,262 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+try:
+    # future >= 0.12
+    from future.backports.test.support import import_fresh_module
+except ImportError:
+    from future.standard_library.test.support import import_fresh_module
+
+import unittest
+import warnings
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio.stats import isubsample, subsample
+
+
+cy_subsample = import_fresh_module('skbio.stats._subsample',
+                                   fresh=['skbio.stats.__subsample'])
+py_subsample = import_fresh_module('skbio.stats._subsample',
+                                   blocked=['skbio.stats.__subsample'])
+
+
+def setup():
+    """Ignore warnings during tests."""
+    warnings.simplefilter("ignore")
+
+
+def teardown():
+    """Clear the list of warning filters, so that no filters are active."""
+    warnings.resetwarnings()
+
+
+class SubsampleCountsTests(object):
+    def test_subsample_counts_nonrandom(self):
+        a = np.array([0, 5, 0])
+
+        # Subsample same number of items that are in input (without
+        # replacement).
+        npt.assert_equal(self.module.subsample_counts(a, 5), a)
+
+        # Can only choose from one bin.
+        exp = np.array([0, 2, 0])
+        npt.assert_equal(self.module.subsample_counts(a, 2), exp)
+        npt.assert_equal(
+            self.module.subsample_counts(a, 2, replace=True), exp)
+
+        # Subsample zero items.
+        a = [3, 0, 1]
+        exp = np.array([0, 0, 0])
+        npt.assert_equal(self.module.subsample_counts(a, 0), exp)
+        npt.assert_equal(self.module.subsample_counts(a, 0, replace=True), exp)
+
+    def test_subsample_counts_without_replacement(self):
+        # Selecting 2 counts from the vector 1000 times yields each of the two
+        # possible results at least once each.
+        a = np.array([2, 0, 1])
+        actual = set()
+        for i in range(1000):
+            obs = self.module.subsample_counts(a, 2)
+            actual.add(tuple(obs))
+        self.assertEqual(actual, {(1, 0, 1), (2, 0, 0)})
+
+        obs = self.module.subsample_counts(a, 2)
+        self.assertTrue(np.array_equal(obs, np.array([1, 0, 1])) or
+                        np.array_equal(obs, np.array([2, 0, 0])))
+
+    def test_subsample_counts_with_replacement(self):
+        # Can choose from all in first bin, all in last bin (since we're
+        # sampling with replacement), or split across bins.
+        a = np.array([2, 0, 1])
+        actual = set()
+        for i in range(1000):
+            obs = self.module.subsample_counts(a, 2, replace=True)
+            actual.add(tuple(obs))
+        self.assertEqual(actual, {(1, 0, 1), (2, 0, 0), (0, 0, 2)})
+
+        # Test that selecting 35 counts from a 36-count vector 1000 times
+        # yields more than 10 different subsamples. If we were subsampling
+        # *without* replacement, there would be only 10 possible subsamples
+        # because there are 10 nonzero bins in array a. However, there are more
+        # than 10 possibilities when sampling *with* replacement.
+        a = np.array([2, 0, 1, 2, 1, 8, 6, 0, 3, 3, 5, 0, 0, 0, 5])
+        actual = set()
+        for i in range(1000):
+            obs = self.module.subsample_counts(a, 35, replace=True)
+            self.assertEqual(obs.sum(), 35)
+            actual.add(tuple(obs))
+        self.assertTrue(len(actual) > 10)
+
+    def test_subsample_counts_with_replacement_equal_n(self):
+        # test when n == counts.sum()
+        a = np.array([0, 0, 3, 4, 2, 1])
+        actual = set()
+        for i in range(1000):
+            obs = self.module.subsample_counts(a, 10, replace=True)
+            self.assertEqual(obs.sum(), 10)
+            actual.add(tuple(obs))
+        self.assertTrue(len(actual) > 1)
+
+    def test_subsample_counts_invalid_input(self):
+        # Negative n.
+        with self.assertRaises(ValueError):
+            self.module.subsample_counts([1, 2, 3], -1)
+
+        # Floats.
+        with self.assertRaises(TypeError):
+            self.module.subsample_counts([1, 2.3, 3], 2)
+
+        # Wrong number of dimensions.
+        with self.assertRaises(ValueError):
+            self.module.subsample_counts([[1, 2, 3], [4, 5, 6]], 2)
+
+        # Input has too few counts.
+        with self.assertRaises(ValueError):
+            self.module.subsample_counts([0, 5, 0], 6)
+
+
+class PySubsampleCountsTests(SubsampleCountsTests, unittest.TestCase):
+    module = py_subsample
+
+
+ at unittest.skipIf(cy_subsample is None,
+                 "Accelerated subsample module unavailable.")
+class CySubsampleCountsTests(SubsampleCountsTests, unittest.TestCase):
+    module = cy_subsample
+
+
+class SubsampleTests(unittest.TestCase):
+    def test_deprecated_api(self):
+        # light test to make sure deprecated API exists; subsample_counts is
+        # more thoroughly tested
+        obs = npt.assert_warns(DeprecationWarning, subsample, [0, 5, 0], 5)
+        npt.assert_equal(obs, [0, 5, 0])
+
+        # replace=True
+        a = np.array([0, 0, 3, 4, 2, 1])
+        actual = set()
+        for i in range(1000):
+            obs = npt.assert_warns(DeprecationWarning, subsample, a, 10,
+                                   replace=True)
+            self.assertEqual(obs.sum(), 10)
+            actual.add(tuple(obs))
+        self.assertTrue(len(actual) > 1)
+
+
+class ISubsampleTests(unittest.TestCase):
+    def setUp(self):
+        np.random.seed(123)
+
+        # comment indicates the expected random value
+        self.sequences = [
+            ('a_1', 'AATTGGCC-a1'),  # 2, 3624216819017203053
+            ('a_2', 'AATTGGCC-a2'),  # 5, 5278339153051796802
+            ('b_1', 'AATTGGCC-b1'),  # 4, 4184670734919783522
+            ('b_2', 'AATTGGCC-b2'),  # 0, 946590342492863505
+            ('a_4', 'AATTGGCC-a4'),  # 3, 4048487933969823850
+            ('a_3', 'AATTGGCC-a3'),  # 7, 7804936597957240377
+            ('c_1', 'AATTGGCC-c1'),  # 8, 8868534167180302049
+            ('a_5', 'AATTGGCC-a5'),  # 1, 3409506807702804593
+            ('c_2', 'AATTGGCC-c2'),  # 9, 8871627813779918895
+            ('c_3', 'AATTGGCC-c3')   # 6, 7233291490207274528
+        ]
+
+    def mock_sequence_iter(self, items):
+        return ({'SequenceID': sid, 'Sequence': seq} for sid, seq in items)
+
+    def test_isubsample_simple(self):
+        maximum = 10
+
+        def bin_f(x):
+            return x['SequenceID'].rsplit('_', 1)[0]
+
+        # note, the result here is sorted by sequence_id but is in heap order
+        # by the random values associated to each sequence
+        exp = sorted([('a', {'SequenceID': 'a_5', 'Sequence': 'AATTGGCC-a5'}),
+                      ('a', {'SequenceID': 'a_1', 'Sequence': 'AATTGGCC-a1'}),
+                      ('a', {'SequenceID': 'a_4', 'Sequence': 'AATTGGCC-a4'}),
+                      ('a', {'SequenceID': 'a_3', 'Sequence': 'AATTGGCC-a3'}),
+                      ('a', {'SequenceID': 'a_2', 'Sequence': 'AATTGGCC-a2'}),
+                      ('b', {'SequenceID': 'b_2', 'Sequence': 'AATTGGCC-b2'}),
+                      ('b', {'SequenceID': 'b_1', 'Sequence': 'AATTGGCC-b1'}),
+                      ('c', {'SequenceID': 'c_3', 'Sequence': 'AATTGGCC-c3'}),
+                      ('c', {'SequenceID': 'c_2', 'Sequence': 'AATTGGCC-c2'}),
+                      ('c', {'SequenceID': 'c_1', 'Sequence': 'AATTGGCC-c1'})],
+                     key=lambda x: x[0])
+        obs = isubsample(self.mock_sequence_iter(self.sequences), maximum,
+                         bin_f=bin_f)
+        self.assertEqual(sorted(obs, key=lambda x: x[0]), exp)
+
+    def test_per_sample_sequences_min_seqs(self):
+        maximum = 10
+        minimum = 3
+
+        def bin_f(x):
+            return x['SequenceID'].rsplit('_', 1)[0]
+
+        # note, the result here is sorted by sequence_id but is in heap order
+        # by the random values associated to each sequence
+        exp = sorted([('a', {'SequenceID': 'a_5', 'Sequence': 'AATTGGCC-a5'}),
+                      ('a', {'SequenceID': 'a_1', 'Sequence': 'AATTGGCC-a1'}),
+                      ('a', {'SequenceID': 'a_4', 'Sequence': 'AATTGGCC-a4'}),
+                      ('a', {'SequenceID': 'a_3', 'Sequence': 'AATTGGCC-a3'}),
+                      ('a', {'SequenceID': 'a_2', 'Sequence': 'AATTGGCC-a2'}),
+                      ('c', {'SequenceID': 'c_3', 'Sequence': 'AATTGGCC-c3'}),
+                      ('c', {'SequenceID': 'c_2', 'Sequence': 'AATTGGCC-c2'}),
+                      ('c', {'SequenceID': 'c_1', 'Sequence': 'AATTGGCC-c1'})],
+                     key=lambda x: x[0])
+        obs = isubsample(self.mock_sequence_iter(self.sequences), maximum,
+                         minimum, bin_f=bin_f)
+        self.assertEqual(sorted(obs, key=lambda x: x[0]), exp)
+
+    def test_per_sample_sequences_complex(self):
+        maximum = 2
+
+        def bin_f(x):
+            return x['SequenceID'].rsplit('_', 1)[0]
+
+        exp = sorted([('a', {'SequenceID': 'a_2', 'Sequence': 'AATTGGCC-a2'}),
+                      ('a', {'SequenceID': 'a_3', 'Sequence': 'AATTGGCC-a3'}),
+                      ('b', {'SequenceID': 'b_2', 'Sequence': 'AATTGGCC-b2'}),
+                      ('b', {'SequenceID': 'b_1', 'Sequence': 'AATTGGCC-b1'}),
+                      ('c', {'SequenceID': 'c_1', 'Sequence': 'AATTGGCC-c1'}),
+                      ('c', {'SequenceID': 'c_2', 'Sequence': 'AATTGGCC-c2'})],
+                     key=lambda x: x[0])
+        obs = isubsample(self.mock_sequence_iter(self.sequences), maximum,
+                         bin_f=bin_f, buf_size=1)
+        self.assertEqual(sorted(obs, key=lambda x: x[0]), exp)
+
+    def test_min_gt_max(self):
+        gen = isubsample([1, 2, 3], maximum=2, minimum=10)
+        with self.assertRaises(ValueError):
+            next(gen)
+
+    def test_min_lt_zero(self):
+        gen = isubsample([1, 2, 3], maximum=0, minimum=-10)
+        with self.assertRaises(ValueError):
+            next(gen)
+
+    def test_max_lt_zero(self):
+        gen = isubsample([1, 2, 3], maximum=-10)
+        with self.assertRaises(ValueError):
+            next(gen)
+
+    def test_binf_is_none(self):
+        maximum = 2
+        items = [1, 2]
+        exp = [(True, 1), (True, 2)]
+        obs = isubsample(items, maximum)
+        self.assertEqual(list(obs), exp)
+
+
+if __name__ == '__main__':
+    import nose
+    nose.runmodule()
diff --git a/skbio/tests/__init__.py b/skbio/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/tests/test_base.py b/skbio/tests/test_base.py
new file mode 100644
index 0000000..eb28cc6
--- /dev/null
+++ b/skbio/tests/test_base.py
@@ -0,0 +1,26 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+
+from skbio._base import SkbioObject
+
+
+class TestSkbioObject(unittest.TestCase):
+    def test_no_instantiation(self):
+        class Foo(SkbioObject):
+            pass
+
+        with self.assertRaises(TypeError):
+            Foo()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/tests/test_workflow.py b/skbio/tests/test_workflow.py
new file mode 100644
index 0000000..5982b46
--- /dev/null
+++ b/skbio/tests/test_workflow.py
@@ -0,0 +1,407 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from future.builtins import zip
+from collections import defaultdict
+from skbio.workflow import (Exists, NotExecuted, NotNone, Workflow, not_none,
+                            requires, method)
+from unittest import TestCase, main
+
+
+def construct_iterator(**kwargs):
+    """make an iterator for testing purposes"""
+    to_gen = []
+    for k in sorted(kwargs):
+        if k.startswith('iter'):
+            to_gen.append(kwargs[k])
+    if len(to_gen) == 1:
+        return (x for x in to_gen[0])
+    else:
+        return zip(*to_gen)
+
+
+class MockWorkflow(Workflow):
+    def initialize_state(self, item):
+        self.state[0] = None
+        self.state[1] = item
+
+    @method(priority=90)
+    @requires(option='A', values=True)
+    def wf_groupA(self):
+        self.methodA1()
+        self.methodA2()
+
+    @method()
+    @requires(option='B', values=True)
+    def wf_groupB(self):
+        self.methodB1()
+        self.methodB2()
+
+    @method(priority=10)
+    @requires(option='C', values=True)
+    def wf_groupC(self):
+        self.methodC1()
+        self.methodC2()
+
+    def methodA1(self):
+        name = 'A1'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+        self.state = [name, self.state[-1]]
+
+    def methodA2(self):
+        name = 'A2'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+        self.state = [name, self.state[-1]]
+
+    def methodB1(self):
+        name = 'B1'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+            self.state = 'failed'
+        else:
+            self.state = [name, self.state[-1]]
+
+    @requires(option='foo', values=[1, 2, 3])
+    def methodB2(self):
+        name = 'B2'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+            self.state = 'failed'
+        else:
+            self.state = [name, self.state[-1]]
+
+    def methodC1(self):
+        name = 'C1'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+        self.state = [name, self.state[-1]]
+
+    @requires(option='C2', values=[1, 2, 3])
+    def methodC2(self):
+        name = 'C2'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+        self.state = [name, self.state[-1]]
+
+
+class WorkflowTests(TestCase):
+    def setUp(self):
+        opts = {'A': True, 'C': True}
+        self.obj_short = MockWorkflow([None, None], options=opts,
+                                      stats=defaultdict(int))
+        self.obj_debug = MockWorkflow([None, None], debug=True, options=opts,
+                                      stats=defaultdict(int))
+        self.obj_noshort = MockWorkflow([None, None], short_circuit=False,
+                                        options=opts,
+                                        stats=defaultdict(int))
+
+    def test_debug_trace(self):
+        gen = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
+        obj = self.obj_debug(gen)
+
+        exp = ['C1', 1]
+        obs = next(obj)
+        self.assertEqual(obs, exp)
+
+        exp_trace = set([('wf_groupA', 0),
+                         ('methodA1', 1),
+                         ('methodA2', 2),
+                         ('wf_groupC', 3),
+                         ('methodC1', 4)])
+
+        exp_pre_state = {('wf_groupA', 0): [None, 1],
+                         ('methodA1', 1): [None, 1],
+                         ('methodA2', 2): ['A1', 1],
+                         ('wf_groupC', 3): ['A2', 1],
+                         ('methodC1', 4): ['A2', 1]}
+
+        exp_post_state = {('wf_groupA', 0): ['A2', 1],
+                          ('methodA1', 1): ['A1', 1],
+                          ('methodA2', 2): ['A2', 1],
+                          ('wf_groupC', 3): ['C1', 1],
+                          ('methodC1', 4): ['C1', 1]}
+
+        obs_trace = self.obj_debug.debug_trace
+        obs_pre_state = self.obj_debug.debug_pre_state
+        obs_post_state = self.obj_debug.debug_post_state
+
+        self.assertEqual(obs_trace, exp_trace)
+        self.assertEqual(obs_pre_state, exp_pre_state)
+        self.assertEqual(obs_post_state, exp_post_state)
+
+    def test_init(self):
+        self.assertEqual(self.obj_short.options, {'A': True, 'C': True})
+        self.assertEqual(self.obj_short.stats, {})
+        self.assertTrue(self.obj_short.short_circuit)
+        self.assertEqual(self.obj_noshort.options, {'A': True, 'C': True})
+        self.assertEqual(self.obj_noshort.stats, {})
+        self.assertFalse(self.obj_noshort.short_circuit)
+
+    def test_init_reserved_attributes(self):
+        with self.assertRaises(AttributeError):
+            Workflow('foo', failed=True)
+
+    def test_all_wf_methods(self):
+        # note on priority: groupA:90, groupC:10, groupB:0 (default)
+        exp = [self.obj_short.wf_groupA, self.obj_short.wf_groupC,
+               self.obj_short.wf_groupB]
+        obs = self.obj_short._all_wf_methods()
+        self.assertEqual(obs, exp)
+
+    def test_call_AC_no_fail(self):
+        iter_ = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
+
+        # success function
+        def sf(x):
+            return x.state[:]
+
+        exp_stats = {'A1': 5, 'A2': 5, 'C1': 5}
+        # C2 isn't executed as its requirements aren't met in the options
+        exp_result = [['C1', 1], ['C1', 2], ['C1', 3], ['C1', 4], ['C1', 5]]
+
+        obs_result = list(self.obj_short(iter_, sf, None))
+
+        self.assertEqual(obs_result, exp_result)
+        self.assertEqual(self.obj_short.stats, exp_stats)
+
+    def test_call_AC_fail(self):
+        iter_ = construct_iterator(**{'iter_x': [1, 2, 'fail A2', 4, 5]})
+
+        # success function
+        def sf(x):
+            return x.state[:]
+
+        ff = sf  # failed function
+
+        exp_stats = {'A1': 5, 'A2': 5, 'C1': 4, 'C2': 4}
+
+        self.obj_short.options['C2'] = 1
+        # pass in a failed callback to capture the result, and pause execution
+        gen = self.obj_short(iter_, sf, ff)
+
+        r1 = next(gen)
+        self.assertEqual(r1, ['C2', 1])
+        self.assertFalse(self.obj_short.failed)
+
+        r2 = next(gen)
+        self.assertEqual(r2, ['C2', 2])
+        self.assertFalse(self.obj_short.failed)
+
+        r3 = next(gen)
+        self.assertEqual(self.obj_short.state, ['A2', 'fail A2'])
+        self.assertTrue(self.obj_short.failed)
+        self.assertEqual(r3, ['A2', 'fail A2'])
+
+        r4 = next(gen)
+        self.assertEqual(r4, ['C2', 4])
+        self.assertFalse(self.obj_short.failed)
+
+        r5 = next(gen)
+        self.assertEqual(r5, ['C2', 5])
+        self.assertFalse(self.obj_short.failed)
+
+        self.assertEqual(self.obj_short.stats, exp_stats)
+
+    def test_call_AC_fail_noshort(self):
+        iter_ = construct_iterator(**{'iter_x': [1, 2, 'fail A2', 4, 5]})
+
+        # success function
+        def sf(x):
+            return x.state[:]
+
+        ff = sf  # failed function
+
+        exp_stats = {'A1': 5, 'A2': 5, 'C1': 5}
+
+        # pass in a failed callback to capture the result, and pause execution
+        gen = self.obj_noshort(iter_, sf, ff)
+
+        r1 = next(gen)
+        self.assertEqual(r1, ['C1', 1])
+        self.assertFalse(self.obj_noshort.failed)
+
+        r2 = next(gen)
+        self.assertEqual(r2, ['C1', 2])
+        self.assertFalse(self.obj_noshort.failed)
+
+        next(gen)
+        self.assertEqual(self.obj_noshort.state, ['C1', 'fail A2'])
+        self.assertTrue(self.obj_noshort.failed)
+
+        r4 = next(gen)
+        self.assertEqual(r4, ['C1', 4])
+        self.assertFalse(self.obj_noshort.failed)
+
+        r5 = next(gen)
+        self.assertEqual(r5, ['C1', 5])
+        self.assertFalse(self.obj_noshort.failed)
+
+        self.assertEqual(self.obj_noshort.stats, exp_stats)
+
+
+class MockWorkflowReqTest(Workflow):
+    def _allocate_state(self):
+        self.state = None
+
+    def initialize_state(self, item):
+        self.state = [None, item]
+
+    @method(priority=5)
+    @requires(state=lambda x: x[-1] < 3)
+    def wf_needs_data(self):
+        name = 'needs_data'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+        self.state = [name, self.state[-1]]
+
+    @method(priority=10)
+    def wf_always_run(self):
+        name = 'always_run'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+        self.state = [name, self.state[-1]]
+
+    @method(priority=20)
+    @requires(option='cannot_be_none', values=not_none)
+    def wf_run_if_not_none(self):
+        name = 'run_if_not_none'
+        self.stats[name] += 1
+        if self.state[-1] == 'fail %s' % name:
+            self.failed = True
+        self.state = [name, self.state[-1]]
+
+
+class RequiresTests(TestCase):
+    def test_validdata(self):
+        obj = MockWorkflowReqTest([None, None], stats=defaultdict(int))
+        single_iter = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
+
+        exp_stats = {'needs_data': 2, 'always_run': 5}
+        exp_result = [['needs_data', 1], ['needs_data', 2], ['always_run', 3],
+                      ['always_run', 4], ['always_run', 5]]
+
+        obs_result = list(obj(single_iter))
+        self.assertEqual(obs_result, exp_result)
+        self.assertEqual(obj.stats, exp_stats)
+
+    def test_not_none_avoid(self):
+        obj = MockWorkflowReqTest([None, None], {'cannot_be_none': None},
+                                  stats=defaultdict(int))
+        single_iter = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
+
+        exp_stats = {'needs_data': 2, 'always_run': 5}
+        exp_result = [['needs_data', 1], ['needs_data', 2], ['always_run', 3],
+                      ['always_run', 4], ['always_run', 5]]
+
+        obs_result = list(obj(single_iter))
+
+        self.assertEqual(obs_result, exp_result)
+        self.assertEqual(obj.stats, exp_stats)
+
+    def test_not_none_execute(self):
+        obj = MockWorkflowReqTest([None, None],
+                                  options={'cannot_be_none': True}, debug=True,
+                                  stats=defaultdict(int))
+        single_iter = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
+
+        exp_stats = {'needs_data': 2, 'always_run': 5, 'run_if_not_none': 5}
+        exp_result = [['needs_data', 1], ['needs_data', 2], ['always_run', 3],
+                      ['always_run', 4], ['always_run', 5]]
+
+        obs_result = list(obj(single_iter))
+        self.assertEqual(obs_result, exp_result)
+        self.assertEqual(obj.stats, exp_stats)
+
+    def test_methodb1(self):
+        obj = MockWorkflow([None, None], stats=defaultdict(int))
+        obj.initialize_state('test')
+        obj.methodB1()
+        self.assertEqual(obj.state, ['B1', 'test'])
+        self.assertFalse(obj.failed)
+
+        # methodb1 executes regardless of if self.failed
+        obj.failed = True
+        obj.initialize_state('test 2')
+        obj.methodB1()
+        self.assertEqual(obj.state, ['B1', 'test 2'])
+
+        obj.failed = False
+        obj.state = [None, 'fail B1']
+        obj.methodB1()
+        self.assertEqual(obj.state, 'failed')
+
+        self.assertEqual(obj.stats, {'B1': 3})
+
+    def test_methodb2_accept(self):
+        # methodb2 is setup to be valid when foo is in [1,2,3], make sure we
+        # can execute
+        obj = MockWorkflow([None, None], options={'foo': 1},
+                           stats=defaultdict(int))
+        obj.initialize_state('test')
+        obj.methodB2()
+        self.assertEqual(obj.state, ['B2', 'test'])
+        self.assertEqual(obj.stats, {'B2': 1})
+
+    def test_methodb2_ignore(self):
+        # methodb2 is setup to be valid when foo is in [1, 2, 3], make sure
+        # we do not execute
+        obj = MockWorkflow([None, None], options={'foo': 'bar'},
+                           stats=defaultdict(int))
+        obj.methodB2()
+        self.assertEqual(obj.state, [None, None])
+        self.assertEqual(obj.stats, {})
+
+
+class PriorityTests(TestCase):
+    def test_dec(self):
+        @method(priority=10)
+        def foo(x, y, z):
+            """doc check"""
+            return x + y + z
+
+        self.assertEqual(foo.priority, 10)
+        self.assertEqual(foo.__name__, 'foo')
+        self.assertEqual(foo.__doc__, 'doc check')
+
+
+class NotExecutedTests(TestCase):
+    def test_call(self):
+        ne = NotExecuted()
+        obs = ne('foo')
+        self.assertTrue(obs is ne)
+        self.assertEqual(obs.msg, 'foo')
+
+
+class ExistsTests(TestCase):
+    def test_contains(self):
+        e = Exists()
+        self.assertTrue('foo' in e)
+        self.assertTrue(None in e)
+
+
+class NotNoneTests(TestCase):
+    def test_contains(self):
+        nn = NotNone()
+        self.assertTrue('foo' in nn)
+        self.assertFalse(None in nn)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/tree/__init__.py b/skbio/tree/__init__.py
new file mode 100644
index 0000000..3afe453
--- /dev/null
+++ b/skbio/tree/__init__.py
@@ -0,0 +1,260 @@
+r"""
+Tree representations (:mod:`skbio.tree`)
+========================================
+
+.. currentmodule:: skbio.tree
+
+This module provides functionality for working with trees, including
+phylogenetic trees and hierarchies, and prefix trees (i.e., tries).
+Functionality is provided for constructing trees, for traversing in multiple
+ways, comparisons, fetching subtrees, and more. This module supports trees that
+are multifurcating and nodes that have single descendants.
+
+Classes
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+    TreeNode
+    CompressedTrie
+
+Phylogenetic Reconstruction
+---------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+    nj
+
+Utility Functions
+-----------------
+
+.. autosummary::
+   :toctree: generated/
+
+    fasta_to_pairlist
+    majority_rule
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   TreeError
+   NoLengthError
+   DuplicateNodeError
+   MissingNodeError
+   NoParentError
+
+Examples
+--------
+
+>>> from skbio import TreeNode
+
+A new tree can be constructed from a Newick string. Newick is a common format
+used to represent tree objects within a file. Newick was part of the original
+PHYLIP package from Joseph Felsenstein's group (defined `here
+<http://goo.gl/fIY1Iq>`_), and is based around representing nesting with
+parentheses. For instance, the following string describes a 3 taxon tree, with
+one internal node:
+
+    ((A, B)C, D)root;
+
+Where A, B, and D are tips of the tree, and C is an internal node that covers
+tips A and B.
+
+Now let's construct a simple tree and dump an ASCII representation:
+
+>>> tree = TreeNode.from_newick("((A, B)C, D)root;")
+>>> print tree.is_root()  # is this the root of the tree?
+True
+>>> print tree.is_tip()  # is this node a tip?
+False
+>>> print tree.ascii_art()
+                    /-A
+          /C-------|
+-root----|          \-B
+         |
+          \-D
+
+There are a few common ways to traverse a tree, and depending on your use,
+some methods are more appropriate than others. Wikipedia has a well written
+page on tree `traversal methods <http://goo.gl/K4Ufl>`_, and will go into
+further depth than what we'll cover here. We're only going to cover two of the
+commonly used traversals here, preorder and postorder, but we will show
+examples of two other common helper traversal methods to gather tips or
+internal nodes.
+
+The first traversal we'll cover is a preorder traversal in which you evaluate
+from root to tips, looking at the left most child first. For instance:
+
+>>> for node in tree.preorder():
+...    print node.name
+root
+C
+A
+B
+D
+
+The next method we'll look at is a postorder traveral which will evaluate the
+left subtree tips first before walking back up the tree:
+
+>>> for node in tree.postorder():
+...    print node.name
+A
+B
+C
+D
+root
+
+`TreeNode` provides two helper methods as well for iterating over just the tips
+or for iterating over just the internal nodes.
+
+>>> for node in tree.tips():
+...    print "Node name: %s, Is a tip: %s" % (node.name, node.is_tip())
+Node name: A, Is a tip: True
+Node name: B, Is a tip: True
+Node name: D, Is a tip: True
+
+>>> for node in tree.non_tips():
+...    print "Node name: %s, Is a tip: %s" % (node.name, node.is_tip())
+Node name: C, Is a tip: False
+
+Note, by default, `non_tips` will ignore `self` (which is the root in this
+case).  You can pass the `include_self` flag to `non_tips` if you wish to
+include `self`.
+
+The `TreeNode` provides a few ways to compare trees. First, let's create two
+similar trees and compare their topologies using `compare_subsets`. This
+distance is the fraction of common clades present in the two trees, where a
+distance of 0 means the trees contain identical clades, and a distance of 1
+indicates the trees do not share any common clades:
+
+>>> tree1 = TreeNode.from_newick("((A, B)C, (D, E)F, (G, H)I)root;")
+>>> tree2 = TreeNode.from_newick("((G, H)C, (D, E)F, (B, A)I)root;")
+>>> tree3 = TreeNode.from_newick("((D, B)C, (A, E)F, (G, H)I)root;")
+>>> print tree1.compare_subsets(tree1)  # identity case
+0.0
+>>> print tree1.compare_subsets(tree2)  # same tree but different clade order
+0.0
+>>> print tree1.compare_subsets(tree3)  # only 1 of 3 common subsets
+0.666666666667
+
+We can additionally take into account branch length when computing distances
+between trees. First, we're going to construct two new trees with described
+branch length, note the difference in the Newick strings:
+
+>>> tree1 = TreeNode.from_newick("((A:0.1, B:0.2)C:0.3, D:0.4, E:0.5)root;")
+>>> tree2 = TreeNode.from_newick("((A:0.4, B:0.8)C:0.3, D:0.1, E:0.5)root;")
+
+In these two trees, we've added on a description of length from the node to
+its parent, so for instance:
+
+>>> for node in tree1.postorder():
+...     print node.name, node.length
+A 0.1
+B 0.2
+C 0.3
+D 0.4
+E 0.5
+root None
+
+Now let's compare two trees using the distances computed pairwise between tips
+in the trees. The distance computed, by default, is the correlation of all
+pairwise tip-to-tip distances between trees:
+
+>>> print tree1.compare_tip_distances(tree1)  # identity case
+0.0
+>>> print tree1.compare_tip_distances(tree2)
+0.120492524415
+
+Prefix trees (i.e., tries) examples
+-----------------------------------
+
+Construct a Trie from a (key, value) list
+
+>>> from skbio.tree import CompressedTrie
+>>> pair_list = [("ab",  "0"),
+...              ("abababa", "1"),
+...              ("abab", "2"),
+...              ("baba", "3"),
+...              ("ababaa", "4"),
+...              ("a", "5"),
+...              ("abababa", "6"),
+...              ("bab", "7"),
+...              ("babba", "8")]
+>>> t = CompressedTrie(pair_list)
+
+Get the number of keys stored in the trie
+
+>>> len(t)
+9
+
+Get the number of nodes in the trie
+
+>>> t.size
+10
+
+Get the trie's prefix map
+
+>>> t.prefix_map
+{'1': ['6', '2', '0', '5'], '8': ['7'], '3': [], '4': []}
+
+Find the value attached to a given key
+
+>>> t.find("ababaa")
+['4']
+
+Add a new (key, value) pair to the Trie
+
+>>> t.insert("bac", "9")
+>>> t.find("bac")
+['9']
+>>> t.prefix_map
+{'1': ['6', '2', '0', '5'], '9': [], '3': [], '4': [], '8': ['7']}
+
+Create a new trie with a list of sequences
+
+>>> from skbio.tree import fasta_to_pairlist
+
+>>> seqs = [("s0", "ACA"),
+...         ("s1", "ACAGTC"),
+...         ("s2", "ACTA"),
+...         ("s3", "CAGT"),
+...         ("s4", "CATGAA"),
+...         ("s5", "A"),
+...         ("s6", "CATGTA"),
+...         ("s7", "CACCA")]
+
+>>> t = CompressedTrie(fasta_to_pairlist(seqs))
+
+>>> t.prefix_map
+{'s3': [], 's2': [], 's1': ['s0', 's5'], 's7': [], 's6': [], 's4': []}
+
+"""
+
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2014--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._tree import TreeNode
+from ._trie import CompressedTrie, fasta_to_pairlist
+from ._nj import nj
+from ._majority_rule import majority_rule
+from ._exception import (TreeError, NoLengthError, DuplicateNodeError,
+                         MissingNodeError, NoParentError)
+
+__all__ = ['TreeNode', 'CompressedTrie', 'fasta_to_pairlist', 'nj',
+           'majority_rule', 'TreeError', 'NoLengthError', 'DuplicateNodeError',
+           'MissingNodeError', 'NoParentError']
+
+test = Tester().test
diff --git a/skbio/tree/_exception.py b/skbio/tree/_exception.py
new file mode 100644
index 0000000..e8828e2
--- /dev/null
+++ b/skbio/tree/_exception.py
@@ -0,0 +1,34 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+
+class TreeError(Exception):
+    """General tree error"""
+    pass
+
+
+class NoLengthError(TreeError):
+    """Missing length when expected"""
+    pass
+
+
+class DuplicateNodeError(TreeError):
+    """Duplicate nodes with identical names"""
+    pass
+
+
+class MissingNodeError(TreeError):
+    """Expecting a node"""
+    pass
+
+
+class NoParentError(MissingNodeError):
+    """Missing a parent"""
+    pass
diff --git a/skbio/tree/_majority_rule.py b/skbio/tree/_majority_rule.py
new file mode 100644
index 0000000..81e185d
--- /dev/null
+++ b/skbio/tree/_majority_rule.py
@@ -0,0 +1,309 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from collections import defaultdict
+from future.builtins import zip
+
+import numpy as np
+
+from skbio.tree import TreeNode
+
+
+def _walk_clades(trees, weights):
+    """Walk all the clades of all the trees
+
+    Parameters
+    ----------
+    trees : list of TreeNode
+        The trees to walk
+    weights : np.array
+        Tree weights
+
+    Returns
+    -------
+    list of tuple
+        The clades and support values sorted by support value such that the
+        most supported clade is index 0. The tuples are of the form:
+        (frozenset, float).
+    defaultdict(float)
+        The edge lengths, keyed by frozenset of the clade, and valued by the
+        weighted average length of the clade by the trees the clade was
+        observed in.
+
+    """
+    clade_counts = defaultdict(float)
+    edge_lengths = defaultdict(float)
+    total = weights.sum()
+
+    # get clade counts
+    def tipnames_f(n):
+        return [n.name] if n.is_tip() else []
+
+    for tree, weight in zip(trees, weights):
+        tree.cache_attr(tipnames_f, 'tip_names', frozenset)
+
+        for node in tree.postorder():
+            tip_names = node.tip_names
+
+            # if node.length is not None, fetch it and weight it
+            length = node.length * weight if node.length is not None else None
+
+            clade_counts[tip_names] += weight
+
+            if length is None:
+                edge_lengths[tip_names] = None
+            else:
+                edge_lengths[tip_names] += length / total
+
+    # sort clades by number times observed
+    clade_counts = sorted(clade_counts.items(), key=lambda x: len(x[0]),
+                          reverse=True)
+
+    return clade_counts, edge_lengths
+
+
+def _filter_clades(clade_counts, cutoff_threshold):
+    """Filter clades that not well supported or are contradicted
+
+    Parameters
+    ----------
+    clade_counts : list of tuple
+        Where the first element in each tuple is the frozenset of the clade,
+        and the second element is the support value. It is expected that this
+        list is sorted by descending order by support.
+    cutoff_threshold : float
+        The minimum weighted observation count that a clade must have to be
+        considered supported.
+
+    Returns
+    -------
+    dict
+        A dict of the accepted clades, keyed by the frozenset of the clade and
+        valued by the support value.
+    """
+    accepted_clades = {}
+
+    for clade, count in clade_counts:
+        conflict = False
+
+        if count <= cutoff_threshold:
+            continue
+
+        if len(clade) > 1:
+            # check the current clade against all the accepted clades to see if
+            # it conflicts. A conflict is defined as:
+            # 1. the clades are not disjoint
+            # 2. neither clade is a subset of the other
+            for accepted_clade in accepted_clades:
+                intersect = clade.intersection(accepted_clade)
+                subset = clade.issubset(accepted_clade)
+                superset = clade.issuperset(accepted_clade)
+
+                if intersect and not (subset or superset):
+                    conflict = True
+
+        if conflict is False:
+            accepted_clades[clade] = count
+
+    return accepted_clades
+
+
+def _build_trees(clade_counts, edge_lengths, support_attr):
+    """Construct the trees with support
+
+    Parameters
+    ----------
+    clade_counts : dict
+        Keyed by the frozenset of the clade and valued by the support
+    edge_lengths : dict
+        Keyed by the frozenset of the clade and valued by the weighted length
+    support_attr : str
+        The name of the attribute to hold the support value
+
+    Returns
+    -------
+    list of TreeNode
+        A list of the constructed trees
+    """
+    nodes = {}
+    queue = [(len(clade), clade) for clade in clade_counts]
+    while queue:
+        # The values within the queue are updated on each iteration, so it
+        # doesn't look like an insertion sort will make sense unfortunately
+        queue.sort()
+        (clade_size, clade) = queue.pop(0)
+        new_queue = []
+
+        # search for ancestors of clade
+        for (_, ancestor) in queue:
+            if clade.issubset(ancestor):
+                # update ancestor such that, in the following example:
+                # ancestor == {1, 2, 3, 4}
+                # clade == {2, 3}
+                # new_ancestor == {1, {2, 3}, 4}
+                new_ancestor = (ancestor - clade) | frozenset([clade])
+
+                # update references for counts and lengths
+                clade_counts[new_ancestor] = clade_counts.pop(ancestor)
+                edge_lengths[new_ancestor] = edge_lengths.pop(ancestor)
+
+                ancestor = new_ancestor
+
+            new_queue.append((len(ancestor), ancestor))
+
+        # if the clade is a tip, then we have a name
+        if clade_size == 1:
+            name = list(clade)[0]
+        else:
+            name = None
+
+        # the clade will not be in nodes if it is a tip
+        children = [nodes.pop(c) for c in clade if c in nodes]
+        length = edge_lengths[clade]
+
+        node = TreeNode(children=children, length=length, name=name)
+        setattr(node, support_attr, clade_counts[clade])
+        nodes[clade] = node
+
+        queue = new_queue
+
+    return list(nodes.values())
+
+
+def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
+    r"""Determines consensus trees from a list of rooted trees
+
+    Parameters
+    ----------
+    trees : list of TreeNode
+        The trees to operate on
+    weights : list or np.array of {int, float}, optional
+        If provided, the list must be in index order with `trees`. Each tree
+        will receive the corresponding weight. If omitted, all trees will be
+        equally weighted.
+    cutoff : float, 0.0 <= cutoff <= 1.0
+        Any clade that has <= cutoff support will be dropped. If cutoff is
+        < 0.5, then it is possible that ties will result. If so, ties are
+        broken arbitrarily depending on list sort order.
+    support_attr : str
+        The attribute to be decorated onto the resulting trees that contain the
+        consensus support.
+
+    Returns
+    -------
+    list of TreeNode
+        Multiple trees can be returned in the case of two or more disjoint sets
+        of tips represented on input.
+
+    Notes
+    -----
+    This code was adapted from PyCogent's majority consensus code originally
+    written by Matthew Wakefield. The method is based off the original
+    description of consensus trees in [1]_. An additional description can be
+    found in the Phylip manual [2]_. This method does not support majority rule
+    extended.
+
+    Support is computed as a weighted average of the tree weights in which the
+    clade was observed in. For instance, if {A, B, C} was observed in 5 trees
+    all with a weight of 1, its support would then be 5.
+
+    References
+    ----------
+    .. [1] Margush T, McMorris FR. (1981) "Consensus n-trees." Bulletin for
+           Mathematical Biology 43(2) 239-44.
+    .. [2] http://evolution.genetics.washington.edu/phylip/doc/consense.html
+
+    Examples
+    --------
+    Computing the majority consensus, using the example from the Phylip manual
+    with the exception that we are computing majority rule and not majority
+    rule extended.
+
+    >>> from skbio.tree import TreeNode
+    >>> trees = [
+    ... TreeNode.from_newick("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));"),
+    ... TreeNode.from_newick("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));"),
+    ... TreeNode.from_newick("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));"),
+    ... TreeNode.from_newick("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));"),
+    ... TreeNode.from_newick("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));"),
+    ... TreeNode.from_newick("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));"),
+    ... TreeNode.from_newick("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));"),
+    ... TreeNode.from_newick("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));"),
+    ... TreeNode.from_newick("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));")]
+    >>> consensus = majority_rule(trees, cutoff=0.5)[0]
+    >>> print(consensus.ascii_art())
+                                  /-E
+                                 |
+                                 |          /-G
+                        /--------|         |
+                       |         |         |          /-F
+                       |         |         |---------|
+                       |          \--------|          \-I
+                       |                   |
+                       |                   |          /-C
+              /--------|                   |         |
+             |         |                    \--------|          /-D
+             |         |                             |         |
+             |         |                              \--------|--J
+    ---------|         |                                       |
+             |         |                                        \-H
+             |         |
+             |          \-B
+             |
+              \-A
+    >>> for node in consensus.non_tips():
+    ...     support_value = node.support
+    ...     names = ' '.join([n.name for n in node.tips()])
+    ...     print("Tips: %s, support: %s" % (names, support_value))
+    Tips: F I, support: 9.0
+    Tips: D J H, support: 6.0
+    Tips: C D J H, support: 6.0
+    Tips: G F I C D J H, support: 6.0
+    Tips: E G F I C D J H, support: 9.0
+    Tips: E G F I C D J H B, support: 9.0
+
+    In the next example, multiple trees will be returned which can happen if
+    clades are not well supported across the trees. In addition, this can arise
+    if not all tips are present across all trees.
+
+    >>> trees = [
+    ...     TreeNode.from_newick("((a,b),(c,d),(e,f))"),
+    ...     TreeNode.from_newick("(a,(c,d),b,(e,f))"),
+    ...     TreeNode.from_newick("((c,d),(e,f),b)"),
+    ...     TreeNode.from_newick("(a,(c,d),(e,f))")]
+    >>> consensus_trees = majority_rule(trees)
+    >>> print(len(consensus_trees))
+    4
+    >>> for tree in consensus_trees:
+    ...     print(tree.ascii_art())
+    --b
+    --a
+              /-f
+    ---------|
+              \-e
+              /-d
+    ---------|
+              \-c
+
+    """
+    if weights is None:
+        weights = np.ones(len(trees), dtype=float)
+    else:
+        weights = np.asarray(weights)
+        if len(weights) != len(trees):
+            raise ValueError("Number of weights and trees differ!")
+
+    cutoff_threshold = cutoff * weights.sum()
+
+    clade_counts, edge_lengths = _walk_clades(trees, weights)
+    clade_counts = _filter_clades(clade_counts, cutoff_threshold)
+    trees = _build_trees(clade_counts, edge_lengths, support_attr)
+
+    return trees
diff --git a/skbio/tree/_nj.py b/skbio/tree/_nj.py
new file mode 100644
index 0000000..eb8a3ba
--- /dev/null
+++ b/skbio/tree/_nj.py
@@ -0,0 +1,286 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import numpy as np
+from six import StringIO
+
+from skbio.stats.distance import DistanceMatrix
+from skbio.tree import TreeNode
+
+
+def nj(dm, disallow_negative_branch_length=True, result_constructor=None):
+    """ Apply neighbor joining for phylogenetic reconstruction.
+
+    Parameters
+    ----------
+    dm : skbio.DistanceMatrix
+        Input distance matrix containing distances between OTUs.
+    disallow_negative_branch_length : bool, optional
+        Neighbor joining can result in negative branch lengths, which don't
+        make sense in an evolutionary context. If `True`, negative branch
+        lengths will be returned as zero, a common strategy for handling this
+        issue that was proposed by the original developers of the algorithm.
+    result_constructor : function, optional
+        Function to apply to construct the result object. This must take a
+        newick-formatted string as input. The result of applying this function
+        to a newick-formatted string will be returned from this function. This
+        defaults to ``lambda x: TreeNode.read(StringIO(x), format='newick')``.
+
+    Returns
+    -------
+    TreeNode
+        By default, the result object is a `TreeNode`, though this can be
+        overridden by passing `result_constructor`.
+
+    See Also
+    --------
+    TreeNode.root_at_midpoint
+
+    Notes
+    -----
+    Neighbor joining was initially described in Saitou and Nei (1987) [1]_. The
+    example presented here is derived from the Wikipedia page on neighbor
+    joining [2]_. The Phylip manual also describes the method [3]_ and Phylip
+    itself provides an implementation which is useful for comparison.
+
+    Neighbor joining, by definition, creates unrooted trees. One strategy for
+    rooting the resulting trees is midpoint rooting, which is accessible as
+    ``TreeNode.root_at_midpoint``.
+
+    References
+    ----------
+    .. [1] Saitou N, and Nei M. (1987) "The neighbor-joining method: a new
+       method for reconstructing phylogenetic trees." Molecular Biology and
+       Evolution. PMID: 3447015.
+    .. [2] http://en.wikipedia.org/wiki/Neighbour_joining
+    .. [3] http://evolution.genetics.washington.edu/phylip/doc/neighbor.html
+
+    Examples
+    --------
+    Define a new distance matrix object describing the distances between five
+    OTUs: a, b, c, d, and e.
+
+    >>> from skbio import DistanceMatrix
+    >>> from skbio.tree import nj
+
+    >>> data = [[0,  5,  9,  9,  8],
+    ...         [5,  0, 10, 10,  9],
+    ...         [9, 10,  0,  8,  7],
+    ...         [9, 10,  8,  0,  3],
+    ...         [8,  9,  7,  3,  0]]
+    >>> ids = list('abcde')
+    >>> dm = DistanceMatrix(data, ids)
+
+    Contstruct the neighbor joining tree representing the relationship between
+    those OTUs. This is returned as a TreeNode object.
+
+    >>> tree = nj(dm)
+    >>> print(tree.ascii_art())
+              /-d
+             |
+             |          /-c
+             |---------|
+    ---------|         |          /-b
+             |          \--------|
+             |                    \-a
+             |
+              \-e
+
+    Again, construct the neighbor joining tree, but instead return the newick
+    string representing the tree, rather than the TreeNode object. (Note that
+    in this example the string output is truncated when printed to facilitate
+    rendering.)
+
+    >>> newick_str = nj(dm, result_constructor=str)
+    >>> print(newick_str[:55], "...")
+    (d:2.000000, (c:4.000000, (b:3.000000, a:2.000000):3.00 ...
+
+    """
+    if dm.shape[0] < 3:
+        raise ValueError(
+            "Distance matrix must be at least 3x3 to "
+            "generate a neighbor joining tree.")
+
+    if result_constructor is None:
+        def result_constructor(x):
+            return TreeNode.read(StringIO(x), format='newick')
+
+    # initialize variables
+    node_definition = None
+
+    # while there are still more than three distances in the distance matrix,
+    # join neighboring nodes.
+    while(dm.shape[0] > 3):
+        # compute the Q matrix
+        q = _compute_q(dm)
+
+        # identify the pair of nodes that have the lowest Q value. if multiple
+        # pairs have equally low Q values, the first pair identified (closest
+        # to the top-left of the matrix) will be chosen. these will be joined
+        # in the current node.
+        idx1, idx2 = _lowest_index(q)
+        pair_member_1 = dm.ids[idx1]
+        pair_member_2 = dm.ids[idx2]
+        # determine the distance of each node to the new node connecting them.
+        pair_member_1_len, pair_member_2_len = _pair_members_to_new_node(
+            dm, idx1, idx2, disallow_negative_branch_length)
+        # define the new node in newick style
+        node_definition = "(%s:%f, %s:%f)" % (pair_member_1,
+                                              pair_member_1_len,
+                                              pair_member_2,
+                                              pair_member_2_len)
+        # compute the new distance matrix, which will contain distances of all
+        # other nodes to this new node
+        dm = _compute_collapsed_dm(
+            dm, pair_member_1, pair_member_2,
+            disallow_negative_branch_length=disallow_negative_branch_length,
+            new_node_id=node_definition)
+
+    # When there are three distances left in the distance matrix, we have a
+    # fully defined tree. The last node is internal, and its distances are
+    # defined by these last three values.
+    # First determine the distance between the last two nodes to be joined in
+    # a pair...
+    pair_member_1 = dm.ids[1]
+    pair_member_2 = dm.ids[2]
+    pair_member_1_len, pair_member_2_len = \
+        _pair_members_to_new_node(dm, pair_member_1, pair_member_2,
+                                  disallow_negative_branch_length)
+    # ...then determine their distance to the other remaining node, but first
+    # handle the trival case where the input dm was only 3 x 3
+    node_definition = node_definition or dm.ids[0]
+    internal_len = _otu_to_new_node(
+        dm, pair_member_1, pair_member_2, node_definition,
+        disallow_negative_branch_length=disallow_negative_branch_length)
+    # ...and finally create the newick string describing the whole tree.
+    newick = "(%s:%f, %s:%f, %s:%f);" % (pair_member_1, pair_member_1_len,
+                                         node_definition, internal_len,
+                                         pair_member_2, pair_member_2_len)
+
+    # package the result as requested by the user and return it.
+    return result_constructor(newick)
+
+
+def _compute_q(dm):
+    """Compute Q matrix, used to identify the next pair of nodes to join.
+
+    """
+    q = np.zeros(dm.shape)
+    n = dm.shape[0]
+    for i in range(n):
+        for j in range(i):
+            q[i, j] = q[j, i] = \
+                ((n - 2) * dm[i, j]) - dm[i].sum() - dm[j].sum()
+    return DistanceMatrix(q, dm.ids)
+
+
+def _compute_collapsed_dm(dm, i, j, disallow_negative_branch_length,
+                          new_node_id):
+    """Return the distance matrix resulting from joining ids i and j in a node.
+
+    If the input distance matrix has shape ``(n, n)``, the result will have
+    shape ``(n-1, n-1)`` as the ids `i` and `j` are collapsed to a single new
+    ids.
+
+    """
+    in_n = dm.shape[0]
+    out_n = in_n - 1
+    out_ids = [new_node_id]
+    out_ids.extend([e for e in dm.ids if e not in (i, j)])
+    result = np.zeros((out_n, out_n))
+    for idx1, out_id1 in enumerate(out_ids[1:]):
+        result[0, idx1 + 1] = result[idx1 + 1, 0] = _otu_to_new_node(
+            dm, i, j, out_id1, disallow_negative_branch_length)
+        for idx2, out_id2 in enumerate(out_ids[1:idx1+1]):
+            result[idx1+1, idx2+1] = result[idx2+1, idx1+1] = \
+                dm[out_id1, out_id2]
+    return DistanceMatrix(result, out_ids)
+
+
+def _lowest_index(dm):
+    """Return the index of the lowest value in the input distance matrix.
+
+    If there are ties for the lowest value, the index of top-left most
+    occurrence of that value will be returned.
+
+    This should be ultimately be replaced with a new DistanceMatrix object
+    method (#228).
+
+    """
+    lowest_value = np.inf
+    for i in range(dm.shape[0]):
+        for j in range(i):
+            curr_index = i, j
+            curr_value = dm[curr_index]
+            if curr_value < lowest_value:
+                lowest_value = curr_value
+                result = curr_index
+    return result
+
+
+def _otu_to_new_node(dm, i, j, k, disallow_negative_branch_length):
+    """Return the distance between a new node and some other node.
+
+    Parameters
+    ----------
+    dm : skbio.DistanceMatrix
+        The input distance matrix.
+    i, j : str
+        Identifiers of entries in the distance matrix to be collapsed. These
+        get collapsed to a new node, internally represented as `u`.
+    k : str
+        Identifier of the entry in the distance matrix for which distance to
+        `u` will be computed.
+    disallow_negative_branch_length : bool
+        Neighbor joining can result in negative branch lengths, which don't
+        make sense in an evolutionary context. If `True`, negative branch
+        lengths will be returned as zero, a common strategy for handling this
+        issue that was proposed by the original developers of the algorithm.
+
+    """
+    k_to_u = 0.5 * (dm[i, k] + dm[j, k] - dm[i, j])
+
+    if disallow_negative_branch_length and k_to_u < 0:
+        k_to_u = 0
+
+    return k_to_u
+
+
+def _pair_members_to_new_node(dm, i, j, disallow_negative_branch_length):
+    """Return the distance between a new node and decendants of that new node.
+
+    Parameters
+    ----------
+    dm : skbio.DistanceMatrix
+        The input distance matrix.
+    i, j : str
+        Identifiers of entries in the distance matrix to be collapsed (i.e.,
+        the descendents of the new node, which is internally represented as
+        `u`).
+    disallow_negative_branch_length : bool
+        Neighbor joining can result in negative branch lengths, which don't
+        make sense in an evolutionary context. If `True`, negative branch
+        lengths will be returned as zero, a common strategy for handling this
+        issue that was proposed by the original developers of the algorithm.
+
+    """
+    n = dm.shape[0]
+    i_to_j = dm[i, j]
+    i_to_u = (0.5 * i_to_j) + ((dm[i].sum() - dm[j].sum()) / (2 * (n - 2)))
+
+    if disallow_negative_branch_length and i_to_u < 0:
+        i_to_u = 0
+
+    j_to_u = i_to_j - i_to_u
+
+    if disallow_negative_branch_length and j_to_u < 0:
+        j_to_u = 0
+
+    return i_to_u, j_to_u
diff --git a/skbio/tree/_tree.py b/skbio/tree/_tree.py
new file mode 100644
index 0000000..261bec8
--- /dev/null
+++ b/skbio/tree/_tree.py
@@ -0,0 +1,3294 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import re
+import warnings
+from operator import or_
+from copy import deepcopy
+from itertools import combinations
+from functools import reduce
+from collections import defaultdict
+
+import numpy as np
+from scipy.stats import pearsonr
+from future.builtins import zip
+from six import StringIO
+
+from skbio._base import SkbioObject
+from skbio.stats.distance import DistanceMatrix
+from skbio.io import RecordError
+from ._exception import (NoLengthError, DuplicateNodeError, NoParentError,
+                         MissingNodeError, TreeError)
+
+
+def distance_from_r(m1, m2):
+    r"""Estimates distance as (1-r)/2: neg correl = max distance
+
+    Parameters
+    ----------
+    m1 : DistanceMatrix
+        a distance matrix to compare
+    m2 : DistanceMatrix
+        a distance matrix to compare
+
+    Returns
+    -------
+    float
+        The distance between m1 and m2
+
+    """
+    return (1-pearsonr(m1.data.flat, m2.data.flat)[0])/2
+
+
+class TreeNode(SkbioObject):
+    r"""Representation of a node within a tree
+
+    A `TreeNode` instance stores links to its parent and optional children
+    nodes. In addition, the `TreeNode` can represent a `length` (e.g., a
+    branch length) between itself and its parent. Within this object, the use
+    of "children" and "descendants" is frequent in the documentation. A child
+    is a direct descendant of a node, while descendants are all nodes that are
+    below a given node (e.g., grand-children, etc).
+
+    Parameters
+    ----------
+    name : str or None
+        A node can have a name. It is common for tips in particular to have
+        names, for instance, in a phylogenetic tree where the tips correspond
+        to species.
+    length : float, int, or None
+        Distances between nodes can be used to represent evolutionary
+        distances, time, etc.
+    parent : TreeNode or None
+        Connect this node to a parent
+    children : list of TreeNode or None
+        Connect this node to existing children
+
+    Attributes
+    ----------
+    name
+    length
+    parent
+    children
+    id
+
+    """
+    default_write_format = 'newick'
+    _exclude_from_copy = set(['parent', 'children', '_tip_cache',
+                              '_non_tip_cache'])
+
+    def __init__(self, name=None, length=None, parent=None, children=None):
+        self.name = name
+        self.length = length
+        self.parent = parent
+        self._tip_cache = {}
+        self._non_tip_cache = {}
+        self._registered_caches = set()
+
+        self.children = []
+        self.id = None
+
+        if children is not None:
+            self.extend(children)
+
+    def __repr__(self):
+        r"""Returns summary of the tree
+
+        Returns
+        -------
+        str
+            A summary of this node and all descendants
+
+        Notes
+        -----
+        This method returns the name of the node and a count of tips and the
+        number of internal nodes in the tree
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c, d)root;"))
+        >>> repr(tree)
+        '<TreeNode, name: root, internal node count: 1, tips count: 3>'
+
+        .. shownumpydoc
+        """
+        nodes = [n for n in self.traverse(include_self=False)]
+        n_tips = sum([n.is_tip() for n in nodes])
+        n_nontips = len(nodes) - n_tips
+        classname = self.__class__.__name__
+        name = self.name if self.name is not None else "unnamed"
+
+        return "<%s, name: %s, internal node count: %d, tips count: %d>" % \
+               (classname, name, n_nontips, n_tips)
+
+    def __str__(self):
+        r"""Returns string version of self, with names and distances
+
+        Returns
+        -------
+        str
+            Returns a Newick representation of the tree
+
+        See Also
+        --------
+        read
+        write
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> str(tree)
+        '((a,b)c);\n'
+
+        .. shownumpydoc
+        """
+
+        fh = StringIO()
+        self.write(fh)
+        string = fh.getvalue()
+        fh.close()
+        return string
+
+    def __iter__(self):
+        r"""Node iter iterates over the `children`."""
+        return iter(self.children)
+
+    def __len__(self):
+        return len(self.children)
+
+    def __getitem__(self, i):
+        r"""Node delegates slicing to `children`."""
+        return self.children[i]
+
+    def _adopt(self, node):
+        r"""Update `parent` references but does NOT update `children`."""
+        self.invalidate_caches()
+        if node.parent is not None:
+            node.parent.remove(node)
+        node.parent = self
+        return node
+
+    def append(self, node):
+        r"""Appends a node to `children`, in-place, cleaning up refs
+
+        `append` will invalidate any node lookup caches, remove an existing
+        parent on `node` if one exists, set the parent of `node` to self
+        and add the `node` to `self` `children`.
+
+        Parameters
+        ----------
+        node : TreeNode
+            An existing TreeNode object
+
+        See Also
+        --------
+        extend
+
+        Examples
+        --------
+        >>> from skbio import TreeNode
+        >>> root = TreeNode(name="root")
+        >>> child1 = TreeNode(name="child1")
+        >>> child2 = TreeNode(name="child2")
+        >>> root.append(child1)
+        >>> root.append(child2)
+        >>> print(root)
+        (child1,child2)root;
+        <BLANKLINE>
+
+        """
+        self.children.append(self._adopt(node))
+
+    def extend(self, nodes):
+        r"""Append a `list` of `TreeNode` to `self`.
+
+        `extend` will invalidate any node lookup caches, remove existing
+        parents of the `nodes` if they have any, set their parents to self
+        and add the nodes to `self` `children`.
+
+        Parameters
+        ----------
+        nodes : list of TreeNode
+            A list of TreeNode objects
+
+        See Also
+        --------
+        append
+
+        Examples
+        --------
+        >>> from skbio import TreeNode
+        >>> root = TreeNode(name="root")
+        >>> root.extend([TreeNode(name="child1"), TreeNode(name="child2")])
+        >>> print(root)
+        (child1,child2)root;
+        <BLANKLINE>
+
+        """
+        self.children.extend([self._adopt(n) for n in nodes])
+
+    def pop(self, index=-1):
+        r"""Remove a `TreeNode` from `self`.
+
+        Remove a child node by its index position. All node lookup caches
+        are invalidated, and the parent reference for the popped node will be
+        set to `None`.
+
+        Parameters
+        ----------
+        index : int
+            The index position in `children` to pop
+
+        Returns
+        -------
+        TreeNode
+            The popped child
+
+        See Also
+        --------
+        remove
+        remove_deleted
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("(a,b)c;"))
+        >>> print(tree.pop(0))
+        a;
+        <BLANKLINE>
+
+        """
+        return self._remove_node(index)
+
+    def _remove_node(self, idx):
+        r"""The actual (and only) method that performs node removal"""
+        self.invalidate_caches()
+        node = self.children.pop(idx)
+        node.parent = None
+        return node
+
+    def remove(self, node):
+        r"""Remove a node from self
+
+        Remove a `node` from `self` by identity of the node.
+
+        Parameters
+        ----------
+        node : TreeNode
+            The node to remove from self's children
+
+        Returns
+        -------
+        bool
+            `True` if the node was removed, `False` otherwise
+
+        See Also
+        --------
+        pop
+        remove_deleted
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("(a,b)c;"))
+        >>> tree.remove(tree.children[0])
+        True
+
+        """
+        for (i, curr_node) in enumerate(self.children):
+            if curr_node is node:
+                self._remove_node(i)
+                return True
+        return False
+
+    def remove_deleted(self, func):
+        r"""Delete nodes in which `func(node)` evaluates `True`.
+
+        Remove all descendants from `self` that evaluate `True` from `func`.
+        This has the potential to drop clades.
+
+        Parameters
+        ----------
+        func : a function
+            A function that evaluates `True` when a node should be deleted
+
+        See Also
+        --------
+        pop
+        remove
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("(a,b)c;"))
+        >>> tree.remove_deleted(lambda x: x.name == 'b')
+        >>> print(tree)
+        (a)c;
+        <BLANKLINE>
+        """
+        for node in self.traverse(include_self=False):
+            if func(node):
+                node.parent.remove(node)
+
+    def prune(self):
+        r"""Reconstructs correct topology after nodes have been removed.
+
+        Internal nodes with only one child will be removed and new connections
+        will be made to reflect change. This method is useful to call
+        following node removals as it will clean up nodes with singular
+        children.
+
+        Names and properties of singular children will override the names and
+        properties of their parents following the prune.
+
+        Node lookup caches are invalidated.
+
+        See Also
+        --------
+        shear
+        remove
+        pop
+        remove_deleted
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> to_delete = tree.find('b')
+        >>> tree.remove_deleted(lambda x: x == to_delete)
+        >>> print(tree)
+        ((a)c,(d,e)f)root;
+        <BLANKLINE>
+        >>> tree.prune()
+        >>> print(tree)
+        ((d,e)f,a)root;
+        <BLANKLINE>
+
+        """
+        # build up the list of nodes to remove so the topology is not altered
+        # while traversing
+        nodes_to_remove = []
+        for node in self.traverse(include_self=False):
+            if len(node.children) == 1:
+                nodes_to_remove.append(node)
+
+        # clean up the single children nodes
+        for node in nodes_to_remove:
+            child = node.children[0]
+
+            if child.length is None or node.length is None:
+                child.length = child.length or node.length
+            else:
+                child.length += node.length
+
+            node.parent.append(child)
+            node.parent.remove(node)
+
+    def shear(self, names):
+        """Lop off tips until the tree just has the desired tip names.
+
+        Parameters
+        ----------
+        names : Iterable of str
+            The tip names on the tree to keep
+
+        Returns
+        -------
+        TreeNode
+            The resulting tree
+
+        Raises
+        ------
+        ValueError
+            If the names do not exist in the tree
+
+        See Also
+        --------
+        prune
+        remove
+        pop
+        remove_deleted
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> t = TreeNode.read(StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        >>> sheared = t.shear(['G', 'M'])
+        >>> print(sheared.to_newick(with_distances=True))
+        (G:3.0,M:3.7);
+
+        """
+        tcopy = self.deepcopy()
+        all_tips = {n.name for n in tcopy.tips()}
+        ids = set(names)
+
+        if not ids.issubset(all_tips):
+            raise ValueError("ids are not a subset of the tree!")
+
+        while len(list(tcopy.tips())) != len(ids):
+            for n in list(tcopy.tips()):
+                if n.name not in ids:
+                    n.parent.remove(n)
+
+        tcopy.prune()
+
+        return tcopy
+
+    def copy(self):
+        r"""Returns a copy of self using an iterative approach
+
+        Perform an iterative deepcopy of self. It is not assured that the copy
+        of node attributes will be performed iteratively as that depends on
+        the copy method of the types being copied
+
+        Returns
+        -------
+        TreeNode
+            A new copy of self
+
+        See Also
+        --------
+        unrooted_deepcopy
+        unrooted_copy
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree_copy = tree.copy()
+        >>> tree_nodes = set([id(n) for n in tree.traverse()])
+        >>> tree_copy_nodes = set([id(n) for n in tree_copy.traverse()])
+        >>> print(len(tree_nodes.intersection(tree_copy_nodes)))
+        0
+
+        """
+        def __copy_node(node_to_copy):
+            r"""Helper method to copy a node"""
+            # this is _possibly_ dangerous, we're assuming the node to copy is
+            # of the same class as self, and has the same exclusion criteria.
+            # however, it is potentially dangerous to mix TreeNode subclasses
+            # within a tree, so...
+            result = self.__class__()
+            efc = self._exclude_from_copy
+            for key in node_to_copy.__dict__:
+                if key not in efc:
+                    result.__dict__[key] = deepcopy(node_to_copy.__dict__[key])
+            return result
+
+        root = __copy_node(self)
+        nodes_stack = [[root, self, len(self.children)]]
+
+        while nodes_stack:
+            # check the top node, any children left unvisited?
+            top = nodes_stack[-1]
+            new_top_node, old_top_node, unvisited_children = top
+
+            if unvisited_children:
+                top[2] -= 1
+                old_child = old_top_node.children[-unvisited_children]
+                new_child = __copy_node(old_child)
+                new_top_node.append(new_child)
+                nodes_stack.append([new_child, old_child,
+                                    len(old_child.children)])
+            else:  # no unvisited children
+                nodes_stack.pop()
+        return root
+
+    __copy__ = copy
+    __deepcopy__ = deepcopy = copy
+
+    def unrooted_deepcopy(self, parent=None):
+        r"""Walks the tree unrooted-style and returns a new copy
+
+        Perform a deepcopy of self and return a new copy of the tree as an
+        unrooted copy. This is useful for defining new roots of the tree as
+        the `TreeNode`.
+
+        This method calls `TreeNode.unrooted_copy` which is recursive.
+
+        Parameters
+        ----------
+        parent : TreeNode or None
+            Used to avoid infinite loops when performing the unrooted traverse
+
+        Returns
+        -------
+        TreeNode
+            A new copy of the tree
+
+        See Also
+        --------
+        copy
+        unrooted_copy
+        root_at
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> new_tree = tree.find('d').unrooted_deepcopy()
+        >>> print(new_tree)
+        (b,c,(a,((f,g)h)e)d)root;
+        <BLANKLINE>
+
+        """
+        root = self.root()
+        root.assign_ids()
+
+        new_tree = root.copy()
+        new_tree.assign_ids()
+
+        new_tree_self = new_tree.find_by_id(self.id)
+        return new_tree_self.unrooted_copy(parent)
+
+    def unrooted_copy(self, parent=None):
+        r"""Walks the tree unrooted-style and returns a copy
+
+        Perform a copy of self and return a new copy of the tree as an
+        unrooted copy. This is useful for defining new roots of the tree as
+        the `TreeNode`.
+
+        This method is recursive.
+
+        Warning, this is _NOT_ a deepcopy
+
+        Parameters
+        ----------
+        parent : TreeNode or None
+            Used to avoid infinite loops when performing the unrooted traverse
+
+        Returns
+        -------
+        TreeNode
+            A new copy of the tree
+
+        See Also
+        --------
+        copy
+        unrooted_deepcopy
+        root_at
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> new_tree = tree.find('d').unrooted_copy()
+        >>> print(new_tree)
+        (b,c,(a,((f,g)h)e)d)root;
+        <BLANKLINE>
+
+        """
+        neighbors = self.neighbors(ignore=parent)
+        children = [c.unrooted_copy(parent=self) for c in neighbors]
+
+        # we might be walking UP the tree, so:
+        if parent is None:
+            # base edge
+            edgename = None
+            length = None
+        elif parent.parent is self:
+            # self's parent is becoming self's child
+            edgename = parent.name
+            length = parent.length
+        else:
+            assert parent is self.parent
+            edgename = self.name
+            length = self.length
+
+        result = self.__class__(name=edgename, children=children,
+                                length=length)
+
+        if parent is None:
+            result.name = "root"
+
+        return result
+
+    def count(self, tips=False):
+        """Get the count of nodes in the tree
+
+        Parameters
+        ----------
+        tips : bool
+            If `True`, only return the count of the number of tips
+
+        Returns
+        -------
+        int
+            The number of nodes or tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> print(tree.count())
+        9
+        >>> print(tree.count(tips=True))
+        5
+
+        """
+        if tips:
+            return len(list(self.tips()))
+        else:
+            return len(list(self.traverse(include_self=True)))
+
+    def subtree(self, tip_list=None):
+        r"""Make a copy of the subtree"""
+        raise NotImplementedError()
+
+    def subset(self):
+        r"""Returns set of names that descend from specified node
+
+        Get the set of `name` on tips that descend from this node.
+
+        Returns
+        -------
+        frozenset
+            The set of names at the tips of the clade that descends from self
+
+        See Also
+        --------
+        subsets
+        compare_subsets
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> sorted(tree.subset())
+        ['a', 'b', 'c', 'f', 'g']
+        """
+        return frozenset({i.name for i in self.tips()})
+
+    def subsets(self):
+        r"""Return all sets of names that come from self and its descendants
+
+        Compute all subsets of tip names over `self`, or, represent a tree as a
+        set of nested sets.
+
+        Returns
+        -------
+        frozenset
+            A frozenset of frozensets of str
+
+        See Also
+        --------
+        subset
+        compare_subsets
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("(((a,b)c,(d,e)f)h)root;"))
+        >>> for s in sorted(tree.subsets()):
+        ...     print(sorted(s))
+        ['a', 'b']
+        ['d', 'e']
+        ['a', 'b', 'd', 'e']
+        """
+        sets = []
+        for i in self.postorder(include_self=False):
+            if not i.children:
+                i.__leaf_set = frozenset([i.name])
+            else:
+                leaf_set = reduce(or_, [c.__leaf_set for c in i.children])
+                if len(leaf_set) > 1:
+                    sets.append(leaf_set)
+                i.__leaf_set = leaf_set
+        return frozenset(sets)
+
+    def root_at(self, node):
+        r"""Return a new tree rooted at the provided node.
+
+        This can be useful for drawing unrooted trees with an orientation that
+        reflects knowledge of the true root location.
+
+        Parameters
+        ----------
+        node : TreeNode or str
+            The node to root at
+
+        Returns
+        -------
+        TreeNode
+            A new copy of the tree
+
+        Raises
+        ------
+        TreeError
+            Raises a `TreeError` if a tip is specified as the new root
+
+        See Also
+        --------
+        root_at_midpoint
+        unrooted_deepcopy
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("(((a,b)c,(d,e)f)g,h)i;"))
+        >>> print(tree.root_at('c'))
+        (a,b,((d,e)f,(h)g)c)root;
+        <BLANKLINE>
+
+        """
+        if isinstance(node, str):
+            node = self.find(node)
+
+        if not node.children:
+            raise TreeError("Can't use a tip (%s) as the root" %
+                            repr(node.name))
+        return node.unrooted_deepcopy()
+
+    def root_at_midpoint(self):
+        r"""Return a new tree rooted at midpoint of the two tips farthest apart
+
+        This method doesn't preserve the internal node naming or structure,
+        but does keep tip to tip distances correct. Uses `unrooted_copy` but
+        operates on a full copy of the tree.
+
+        Raises
+        ------
+        TreeError
+            If a tip ends up being the mid point
+
+        Returns
+        -------
+        TreeNode
+            A tree rooted at its midpoint
+        LengthError
+            Midpoint rooting requires `length` and will raise (indirectly) if
+            evaluated nodes don't have length.
+
+        See Also
+        --------
+        root_at
+        unrooted_deepcopy
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)"
+        ...                               "a:1;"))
+        >>> print(tree.root_at_midpoint())
+        ((d:1.0,e:1.0,(g:1.0)f:1.0)c:0.5,((h:1.0)b:1.0):0.5)root;
+        <BLANKLINE>
+
+        """
+        tree = self.copy()
+        max_dist, tips = tree.get_max_distance()
+        half_max_dist = max_dist / 2.0
+
+        if max_dist == 0.0:  # only pathological cases with no lengths
+            return tree
+
+        tip1 = tree.find(tips[0])
+        tip2 = tree.find(tips[1])
+        lca = tree.lowest_common_ancestor([tip1, tip2])
+
+        if tip1.accumulate_to_ancestor(lca) > half_max_dist:
+            climb_node = tip1
+        else:
+            climb_node = tip2
+
+        dist_climbed = 0.0
+        while dist_climbed + climb_node.length < half_max_dist:
+            dist_climbed += climb_node.length
+            climb_node = climb_node.parent
+
+        # now midpt is either at on the branch to climb_node's  parent
+        # or midpt is at climb_node's parent
+        if dist_climbed + climb_node.length == half_max_dist:
+            # climb to midpoint spot
+            climb_node = climb_node.parent
+            if climb_node.is_tip():
+                raise TreeError('error trying to root tree at tip')
+            else:
+                return climb_node.unrooted_copy()
+
+        else:
+            # make a new node on climb_node's branch to its parent
+            old_br_len = climb_node.length
+
+            new_root = tree.__class__()
+            climb_node.parent.append(new_root)
+            new_root.append(climb_node)
+
+            climb_node.length = half_max_dist - dist_climbed
+            new_root.length = old_br_len - climb_node.length
+
+            return new_root.unrooted_copy()
+
+    def is_tip(self):
+        r"""Returns `True` if the current node has no `children`.
+
+        Returns
+        -------
+        bool
+            `True` if the node is a tip
+
+        See Also
+        --------
+        is_root
+        has_children
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> print(tree.is_tip())
+        False
+        >>> print(tree.find('a').is_tip())
+        True
+
+        """
+        return not self.children
+
+    def is_root(self):
+        r"""Returns `True` if the current is a root, i.e. has no `parent`.
+
+        Returns
+        -------
+        bool
+            `True` if the node is the root
+
+        See Also
+        --------
+        is_tip
+        has_children
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> print(tree.is_root())
+        True
+        >>> print(tree.find('a').is_root())
+        False
+
+        """
+        return self.parent is None
+
+    def has_children(self):
+        r"""Returns `True` if the node has `children`.
+
+        Returns
+        -------
+        bool
+            `True` if the node has children.
+
+        See Also
+        --------
+        is_tip
+        is_root
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> print(tree.has_children())
+        True
+        >>> print(tree.find('a').has_children())
+        False
+
+        """
+        return not self.is_tip()
+
+    def traverse(self, self_before=True, self_after=False, include_self=True):
+        r"""Returns iterator over descendants
+
+        This is a depth-first traversal. Since the trees are not binary,
+        preorder and postorder traversals are possible, but inorder traversals
+        would depend on the data in the tree and are not handled here.
+
+        Parameters
+        ----------
+        self_before : bool
+            includes each node before its descendants if True
+        self_after : bool
+            includes each node after its descendants if True
+        include_self : bool
+            include the initial node if True
+
+        `self_before` and `self_after` are independent. If neither is `True`,
+        only terminal nodes will be returned.
+
+        Note that if self is terminal, it will only be included once even if
+        `self_before` and `self_after` are both `True`.
+
+        Returns
+        -------
+        GeneratorType
+            Yields successive `TreeNode` objects
+
+        See Also
+        --------
+        preorder
+        postorder
+        pre_and_postorder
+        levelorder
+        tips
+        non_tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> for node in tree.traverse():
+        ...     print(node.name)
+        None
+        c
+        a
+        b
+
+        """
+        if self_before:
+            if self_after:
+                return self.pre_and_postorder(include_self=include_self)
+            else:
+                return self.preorder(include_self=include_self)
+        else:
+            if self_after:
+                return self.postorder(include_self=include_self)
+            else:
+                return self.tips(include_self=include_self)
+
+    def preorder(self, include_self=True):
+        r"""Performs preorder iteration over tree
+
+        Parameters
+        ----------
+        include_self : bool
+            include the initial node if True
+
+        Returns
+        -------
+        GeneratorType
+            Yields successive `TreeNode` objects
+
+        See Also
+        --------
+        traverse
+        postorder
+        pre_and_postorder
+        levelorder
+        tips
+        non_tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> for node in tree.preorder():
+        ...     print(node.name)
+        None
+        c
+        a
+        b
+
+        """
+        stack = [self]
+        while stack:
+            curr = stack.pop()
+            if include_self or (curr is not self):
+                yield curr
+            if curr.children:
+                stack.extend(curr.children[::-1])
+
+    def postorder(self, include_self=True):
+        r"""Performs postorder iteration over tree.
+
+        This is somewhat inelegant compared to saving the node and its index
+        on the stack, but is 30% faster in the average case and 3x faster in
+        the worst case (for a comb tree).
+
+        Parameters
+        ----------
+        include_self : bool
+            include the initial node if True
+
+        Returns
+        -------
+        GeneratorType
+            Yields successive `TreeNode` objects
+
+        See Also
+        --------
+        traverse
+        preorder
+        pre_and_postorder
+        levelorder
+        tips
+        non_tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> for node in tree.postorder():
+        ...     print(node.name)
+        a
+        b
+        c
+        None
+
+        """
+        child_index_stack = [0]
+        curr = self
+        curr_children = self.children
+        curr_children_len = len(curr_children)
+        while 1:
+            curr_index = child_index_stack[-1]
+            # if there are children left, process them
+            if curr_index < curr_children_len:
+                curr_child = curr_children[curr_index]
+                # if the current child has children, go there
+                if curr_child.children:
+                    child_index_stack.append(0)
+                    curr = curr_child
+                    curr_children = curr.children
+                    curr_children_len = len(curr_children)
+                    curr_index = 0
+                # otherwise, yield that child
+                else:
+                    yield curr_child
+                    child_index_stack[-1] += 1
+            # if there are no children left, return self, and move to
+            # self's parent
+            else:
+                if include_self or (curr is not self):
+                    yield curr
+                if curr is self:
+                    break
+                curr = curr.parent
+                curr_children = curr.children
+                curr_children_len = len(curr_children)
+                child_index_stack.pop()
+                child_index_stack[-1] += 1
+
+    def pre_and_postorder(self, include_self=True):
+        r"""Performs iteration over tree, visiting node before and after
+
+        Parameters
+        ----------
+        include_self : bool
+            include the initial node if True
+
+        Returns
+        -------
+        GeneratorType
+            Yields successive `TreeNode` objects
+
+        See Also
+        --------
+        traverse
+        postorder
+        preorder
+        levelorder
+        tips
+        non_tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> for node in tree.pre_and_postorder():
+        ...     print(node.name)
+        None
+        c
+        a
+        b
+        c
+        None
+
+        """
+        # handle simple case first
+        if not self.children:
+            if include_self:
+                yield self
+            raise StopIteration
+        child_index_stack = [0]
+        curr = self
+        curr_children = self.children
+        while 1:
+            curr_index = child_index_stack[-1]
+            if not curr_index:
+                if include_self or (curr is not self):
+                    yield curr
+            # if there are children left, process them
+            if curr_index < len(curr_children):
+                curr_child = curr_children[curr_index]
+                # if the current child has children, go there
+                if curr_child.children:
+                    child_index_stack.append(0)
+                    curr = curr_child
+                    curr_children = curr.children
+                    curr_index = 0
+                # otherwise, yield that child
+                else:
+                    yield curr_child
+                    child_index_stack[-1] += 1
+            # if there are no children left, return self, and move to
+            # self's parent
+            else:
+                if include_self or (curr is not self):
+                    yield curr
+                if curr is self:
+                    break
+                curr = curr.parent
+                curr_children = curr.children
+                child_index_stack.pop()
+                child_index_stack[-1] += 1
+
+    def levelorder(self, include_self=True):
+        r"""Performs levelorder iteration over tree
+
+        Parameters
+        ----------
+        include_self : bool
+            include the initial node if True
+
+        Returns
+        -------
+        GeneratorType
+            Yields successive `TreeNode` objects
+
+        See Also
+        --------
+        traverse
+        postorder
+        preorder
+        pre_and_postorder
+        tips
+        non_tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> for node in tree.levelorder():
+        ...     print(node.name)
+        None
+        c
+        f
+        a
+        b
+        d
+        e
+
+        """
+        queue = [self]
+        while queue:
+            curr = queue.pop(0)
+            if include_self or (curr is not self):
+                yield curr
+            if curr.children:
+                queue.extend(curr.children)
+
+    def tips(self, include_self=False):
+        r"""Iterates over tips descended from `self`.
+
+        Node order is consistent between calls and is ordered by a
+        postorder traversal of the tree.
+
+        Parameters
+        ----------
+        include_self : bool
+            include the initial node if True
+
+        Returns
+        -------
+        GeneratorType
+            Yields successive `TreeNode` objects
+
+        See Also
+        --------
+        traverse
+        postorder
+        preorder
+        pre_and_postorder
+        levelorder
+        non_tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> for node in tree.tips():
+        ...     print(node.name)
+        a
+        b
+        d
+        e
+
+        """
+        for n in self.postorder(include_self=False):
+            if n.is_tip():
+                yield n
+
+    def non_tips(self, include_self=False):
+        r"""Iterates over nontips descended from self
+
+        `include_self`, if `True` (default is False), will return the current
+        node as part of non_tips if it is a non_tip. Node order is consistent
+        between calls and is ordered by a postorder traversal of the tree.
+
+
+        Parameters
+        ----------
+        include_self : bool
+            include the initial node if True
+
+        Returns
+        -------
+        GeneratorType
+            Yields successive `TreeNode` objects
+
+        See Also
+        --------
+        traverse
+        postorder
+        preorder
+        pre_and_postorder
+        levelorder
+        tips
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> for node in tree.non_tips():
+        ...     print(node.name)
+        c
+        f
+
+        """
+        for n in self.postorder(include_self):
+            if not n.is_tip():
+                yield n
+
+    def invalidate_caches(self, attr=True):
+        r"""Delete lookup and attribute caches
+
+        Parameters
+        ----------
+        attr : bool, optional
+            If ``True``, invalidate attribute caches created by
+            `TreeNode.cache_attr`.
+
+        See Also
+        --------
+        create_caches
+        cache_attr
+        find
+
+        """
+        if not self.is_root():
+            self.root().invalidate_caches()
+        else:
+            self._tip_cache = {}
+            self._non_tip_cache = {}
+
+            if self._registered_caches and attr:
+                for n in self.traverse():
+                    for cache in self._registered_caches:
+                        if hasattr(n, cache):
+                            delattr(n, cache)
+
+    def create_caches(self):
+        r"""Construct an internal lookups to facilitate searching by name
+
+        This method will not cache nodes in which the .name is None. This
+        method will raise `DuplicateNodeError` if a name conflict in the tips
+        is discovered, but will not raise if on internal nodes. This is
+        because, in practice, the tips of a tree are required to be unique
+        while no such requirement holds for internal nodes.
+
+        Raises
+        ------
+        DuplicateNodeError
+            The tip cache requires that names are unique (with the exception of
+            names that are None)
+
+        See Also
+        --------
+        invalidate_caches
+        cache_attr
+        find
+
+        """
+        if not self.is_root():
+            self.root().create_caches()
+        else:
+            if self._tip_cache and self._non_tip_cache:
+                return
+
+            self.invalidate_caches(attr=False)
+
+            tip_cache = {}
+            non_tip_cache = defaultdict(list)
+
+            for node in self.postorder():
+                name = node.name
+
+                if name is None:
+                    continue
+
+                if node.is_tip():
+                    if name in tip_cache:
+                        raise DuplicateNodeError("Tip with name '%s' already "
+                                                 "exists!" % name)
+
+                    tip_cache[name] = node
+                else:
+                    non_tip_cache[name].append(node)
+
+            self._tip_cache = tip_cache
+            self._non_tip_cache = non_tip_cache
+
+    def find_all(self, name):
+        r"""Find all nodes that match `name`
+
+        The first call to `find_all` will cache all nodes in the tree on the
+        assumption that additional calls to `find_all` will be made.
+
+        Parameters
+        ----------
+        name : TreeNode or str
+            The name or node to find. If `name` is `TreeNode` then all other
+            nodes with the same name will be returned.
+
+        Raises
+        ------
+        MissingNodeError
+            Raises if the node to be searched for is not found
+
+        Returns
+        -------
+        list of TreeNode
+            The nodes found
+
+        See Also
+        --------
+        find
+        find_by_id
+        find_by_func
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio.tree import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)d,(f,g)c);"))
+        >>> for node in tree.find_all('c'):
+        ...     print(node.name, node.children[0].name, node.children[1].name)
+        c a b
+        c f g
+        >>> for node in tree.find_all('d'):
+        ...     print(node.name, str(node))
+        d (d,e)d;
+        <BLANKLINE>
+        d d;
+        <BLANKLINE>
+        """
+        root = self.root()
+
+        # if what is being passed in looks like a node, just return it
+        if isinstance(name, root.__class__):
+            return [name]
+
+        root.create_caches()
+
+        tip = root._tip_cache.get(name, None)
+        nodes = root._non_tip_cache.get(name, [])
+
+        nodes.append(tip) if tip is not None else None
+
+        if not nodes:
+            raise MissingNodeError("Node %s is not in self" % name)
+        else:
+            return nodes
+
+    def find(self, name):
+        r"""Find a node by `name`.
+
+        The first call to `find` will cache all nodes in the tree on the
+        assumption that additional calls to `find` will be made.
+
+        `find` will first attempt to find the node in the tips. If it cannot
+        find a corresponding tip, then it will search through the internal
+        nodes of the tree. In practice, phylogenetic trees and other common
+        trees in biology do not have unique internal node names. As a result,
+        this find method will only return the first occurance of an internal
+        node encountered on a postorder traversal of the tree.
+
+        Parameters
+        ----------
+        name : TreeNode or str
+            The name or node to find. If `name` is `TreeNode` then it is
+            simply returned
+
+        Raises
+        ------
+        MissingNodeError
+            Raises if the node to be searched for is not found
+
+        Returns
+        -------
+        TreeNode
+            The found node
+
+        See Also
+        --------
+        find_all
+        find_by_id
+        find_by_func
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> print(tree.find('c').name)
+        c
+        """
+        root = self.root()
+
+        # if what is being passed in looks like a node, just return it
+        if isinstance(name, root.__class__):
+            return name
+
+        root.create_caches()
+        node = root._tip_cache.get(name, None)
+
+        if node is None:
+            node = root._non_tip_cache.get(name, [None])[0]
+
+        if node is None:
+            raise MissingNodeError("Node %s is not in self" % name)
+        else:
+            return node
+
+    def find_by_id(self, node_id):
+        r"""Find a node by `id`.
+
+        This search method is based from the root.
+
+        Parameters
+        ----------
+        node_id : int
+            The `id` of a node in the tree
+
+        Returns
+        -------
+        TreeNode
+            The tree node with the matcing id
+
+        Notes
+        -----
+        This method does not cache id associations. A full traversal of the
+        tree is performed to find a node by an id on every call.
+
+        Raises
+        ------
+        MissingNodeError
+            This method will raise if the `id` cannot be found
+
+        See Also
+        --------
+        find
+        find_all
+        find_by_func
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> print(tree.find_by_id(2).name)
+        d
+
+        """
+        # if this method gets used frequently, then we should cache by ID
+        # as well
+        root = self.root()
+        root.assign_ids()
+
+        node = None
+        for n in self.traverse(include_self=True):
+            if n.id == node_id:
+                node = n
+                break
+
+        if node is None:
+            raise MissingNodeError("ID %d is not in self" % node_id)
+        else:
+            return node
+
+    def find_by_func(self, func):
+        r"""Find all nodes given a function
+
+        This search method is based on the current subtree, not the root.
+
+        Parameters
+        ----------
+        func : a function
+            A function that accepts a TreeNode and returns `True` or `Fals`,
+            where `True` indicates the node is to be yielded
+
+        Returns
+        -------
+        GeneratorType
+            A generator that yields nodes
+
+        See Also
+        --------
+        find
+        find_all
+        find_by_id
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> func = lambda x: x.parent == tree.find('c')
+        >>> [n.name for n in tree.find_by_func(func)]
+        ['a', 'b']
+        """
+        for node in self.traverse(include_self=True):
+            if func(node):
+                yield node
+
+    def ancestors(self):
+        r"""Returns all ancestors back to the root
+
+        This call will return all nodes in the path back to root, but does not
+        include the node instance that the call was made from.
+
+        Returns
+        -------
+        list of TreeNode
+            The path, toward the root, from self
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> [node.name for node in tree.find('a').ancestors()]
+        ['c', 'root']
+
+        """
+        result = []
+        curr = self
+        while not curr.is_root():
+            result.append(curr.parent)
+            curr = curr.parent
+
+        return result
+
+    def root(self):
+        r"""Returns root of the tree `self` is in
+
+        Returns
+        -------
+        TreeNode
+            The root of the tree
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tip_a = tree.find('a')
+        >>> root = tip_a.root()
+        >>> root == tree
+        True
+
+        """
+        curr = self
+        while not curr.is_root():
+            curr = curr.parent
+        return curr
+
+    def siblings(self):
+        r"""Returns all nodes that are `children` of `self` `parent`.
+
+        This call excludes `self` from the list.
+
+        Returns
+        -------
+        list of TreeNode
+            The list of sibling nodes relative to self
+
+        See Also
+        --------
+        neighbors
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e,f)g)root;"))
+        >>> tip_e = tree.find('e')
+        >>> [n.name for n in tip_e.siblings()]
+        ['d', 'f']
+
+        """
+        if self.is_root():
+            return []
+
+        result = self.parent.children[:]
+        result.remove(self)
+
+        return result
+
+    def neighbors(self, ignore=None):
+        r"""Returns all nodes that are connected to self
+
+        This call does not include `self` in the result
+
+        Parameters
+        ----------
+        ignore : TreeNode
+            A node to ignore
+
+        Returns
+        -------
+        list of TreeNode
+            The list of all nodes that are connected to self
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> node_c = tree.find('c')
+        >>> [n.name for n in node_c.neighbors()]
+        ['a', 'b', 'root']
+
+        """
+        nodes = [n for n in self.children + [self.parent] if n is not None]
+        if ignore is None:
+            return nodes
+        else:
+            return [n for n in nodes if n is not ignore]
+
+    def lowest_common_ancestor(self, tipnames):
+        r"""Lowest common ancestor for a list of tips
+
+        Parameters
+        ----------
+        tipnames : list of TreeNode or str
+            The nodes of interest
+
+        Returns
+        -------
+        TreeNode
+            The lowest common ancestor of the passed in nodes
+
+        Raises
+        ------
+        ValueError
+            If no tips could be found in the tree
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> nodes = [tree.find('a'), tree.find('b')]
+        >>> lca = tree.lowest_common_ancestor(nodes)
+        >>> print(lca.name)
+        c
+        >>> nodes = [tree.find('a'), tree.find('e')]
+        >>> lca = tree.lca(nodes)  # lca is an alias for convience
+        >>> print(lca.name)
+        root
+
+        """
+        if len(tipnames) == 1:
+            return self.find(tipnames[0])
+
+        tips = [self.find(name) for name in tipnames]
+
+        if len(tips) == 0:
+            raise ValueError("No tips found!")
+
+        nodes_to_scrub = []
+
+        for t in tips:
+            if t.is_root():
+                # has to be the LCA...
+                return t
+
+            prev = t
+            curr = t.parent
+
+            while curr and not hasattr(curr, 'black'):
+                setattr(curr, 'black', [prev])
+                nodes_to_scrub.append(curr)
+                prev = curr
+                curr = curr.parent
+
+            # increase black count, multiple children lead to here
+            if curr:
+                curr.black.append(prev)
+
+        curr = self
+        while len(curr.black) == 1:
+            curr = curr.black[0]
+
+        # clean up tree
+        for n in nodes_to_scrub:
+            delattr(n, 'black')
+
+        return curr
+
+    lca = lowest_common_ancestor  # for convenience
+
+    @classmethod
+    def from_taxonomy(cls, lineage_map):
+        """Construct a tree from a taxonomy
+
+        Parameters
+        ----------
+        lineage_map : iterable of tuple
+            A id to lineage mapping where the first index is an ID and the
+            second index is an iterable of the lineage.
+
+        Returns
+        -------
+        TreeNode
+            The constructed taxonomy
+
+        Examples
+        --------
+        >>> from skbio.tree import TreeNode
+        >>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
+        ...             '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
+        ...             '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
+        ...             '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
+        ...             '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
+        ...             '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
+        ...             '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
+        ...             '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
+        ...             '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
+        >>> tree = TreeNode.from_taxonomy(lineages.items())
+        >>> print(tree.ascii_art())
+                                      /Clostridia-1
+                            /Firmicutes
+                           |          \Bacilli- /-2
+                  /Bacteria|
+                 |         |                    /-3
+                 |         |          /Sphingobacteria
+                 |          \Bacteroidetes      \-8
+                 |                   |
+        ---------|                    \Cytophagia-9
+                 |
+                 |                              /-5
+                 |                    /Thermoplasmata
+                 |                   |          \-4
+                  \Archaea- /Euryarchaeota
+                                     |          /-7
+                                      \Halobacteria
+                                                \-6
+
+        """
+        root = cls(name=None)
+        root._lookup = {}
+
+        for id_, lineage in lineage_map:
+            cur_node = root
+
+            # for each name, see if we've seen it, if not, add that puppy on
+            for name in lineage:
+                if name in cur_node._lookup:
+                    cur_node = cur_node._lookup[name]
+                else:
+                    new_node = TreeNode(name=name)
+                    new_node._lookup = {}
+                    cur_node._lookup[name] = new_node
+                    cur_node.append(new_node)
+                    cur_node = new_node
+
+            cur_node.append(TreeNode(name=id_))
+
+        # scrub the lookups
+        for node in root.non_tips(include_self=True):
+            del node._lookup
+
+        return root
+
+    @classmethod
+    def from_file(cls, tree_f):
+        """Load a tree from a file or file-like object
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``from_file`` will be removed in scikit-bio 0.3.0. It is replaced
+           by ``read``, which is a more general method for deserializing
+           TreeNode instances. ``read`` supports multiple file formats,
+           automatic file format detection, etc. by taking advantage of
+           scikit-bio's I/O registry system. See :mod:`skbio.io` for more
+           details.
+
+        """
+        warnings.warn(
+            "TreeNode.from_file is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use TreeNode.read.",
+            DeprecationWarning)
+        return cls.read(tree_f, format='newick')
+
+    def _balanced_distance_to_tip(self):
+        """Return the distance to tip from this node.
+
+        The distance to every tip from this node must be equal for this to
+        return a correct result.
+
+        Returns
+        -------
+        int
+            The distance to tip of a length-balanced tree
+
+        """
+        node = self
+        distance = 0
+        while node.has_children():
+            distance += node.children[0].length
+            node = node.children[0]
+        return distance
+
+    @classmethod
+    def from_linkage_matrix(cls, linkage_matrix, id_list):
+        """Return tree from SciPy linkage matrix.
+
+        Parameters
+        ----------
+        linkage_matrix : ndarray
+            A SciPy linkage matrix as returned by
+            `scipy.cluster.hierarchy.linkage`
+        id_list : list
+            The indices of the `id_list` will be used in the linkage_matrix
+
+        Returns
+        -------
+        TreeNode
+            An unrooted bifurcated tree
+
+        See Also
+        --------
+        scipy.cluster.hierarchy.linkage
+
+        """
+        tip_width = len(id_list)
+        cluster_count = len(linkage_matrix)
+        lookup_len = cluster_count + tip_width
+        node_lookup = np.empty(lookup_len, dtype=TreeNode)
+
+        for i, name in enumerate(id_list):
+            node_lookup[i] = TreeNode(name=name)
+
+        for i in range(tip_width, lookup_len):
+            node_lookup[i] = TreeNode()
+
+        newest_cluster_index = cluster_count + 1
+        for link in linkage_matrix:
+            child_a = node_lookup[int(link[0])]
+            child_b = node_lookup[int(link[1])]
+
+            path_length = link[2] / 2
+            child_a.length = path_length - child_a._balanced_distance_to_tip()
+            child_b.length = path_length - child_b._balanced_distance_to_tip()
+
+            new_cluster = node_lookup[newest_cluster_index]
+            new_cluster.append(child_a)
+            new_cluster.append(child_b)
+
+            newest_cluster_index += 1
+
+        return node_lookup[-1]
+
+    @classmethod
+    def from_newick(cls, lines, unescape_name=True):
+        r"""Returns tree from the Clustal .dnd file format and equivalent
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``from_newick`` will be removed in scikit-bio 0.3.0. It is replaced
+           by ``read``, which is a more general method for deserializing
+           TreeNode instances. ``read`` supports multiple file formats,
+           automatic file format detection, etc. by taking advantage of
+           scikit-bio's I/O registry system. See :mod:`skbio.io` for more
+           details.
+
+        The tree is made of `skbio.TreeNode` objects, with branch
+        lengths if specified by the format.
+
+        More information on the Newick format can be found here [1]. In brief,
+        the format uses parentheses to define nesting. For instance, a three
+        taxon tree can be represented with::
+
+            ((a,b),c);
+
+        Two possible ways to represent this tree drawing it out would be::
+
+               *
+              / \
+             *   \
+            / \   \
+            a b   c
+
+            a
+             \__|___ c
+             /
+            b
+
+        The Newick format allows for defining branch length as well, for
+        example::
+
+            ((a:0.1,b:0.2):0.3,c:0.4);
+
+        This structure has a the same topology as the first example but the
+        tree now contains more information about how similar or dissimilar
+        nodes are to their parents. In the above example, we can see that tip
+        `a` has a distance of 0.1 to its parent, and `b` has a distance of 0.2
+        to its parent. We can additionally see that the clade that encloses
+        tips `a` and `b` has a distance of 0.3 to its parent, or in this case,
+        the root.
+
+        Parameters
+        ----------
+        lines : a str, a list of str, or a file-like object
+            The input newick string to parse
+        unescape_names : bool
+            Remove extraneous quote marks around names. Sometimes other
+            programs are sensitive to the characters used in names, and it
+            is essential (at times) to quote node names for compatibility.
+
+        Returns
+        -------
+        TreeNode
+            The root of the parsed tree
+
+        Raises
+        ------
+        RecordError
+            The following three conditions will trigger a `RecordError`:
+                * Unbalanced number of left and right parentheses
+                * A malformed newick string. For instance, if a semicolon is
+                    embedded within the string as opposed to at the end.
+                * If a non-newick string is passed.
+
+        See Also
+        --------
+        to_newick
+
+        Examples
+        --------
+        >>> from skbio import TreeNode
+        >>> TreeNode.from_newick("((a,b)c,(d,e)f)root;")
+        <TreeNode, name: root, internal node count: 2, tips count: 4>
+        >>> from six import StringIO
+        >>> s = StringIO("((a,b),c);")
+        >>> TreeNode.from_newick(s)
+        <TreeNode, name: unnamed, internal node count: 1, tips count: 3>
+
+        References
+        ----------
+        [1] http://evolution.genetics.washington.edu/phylip/newicktree.html
+
+        """
+        warnings.warn(
+            "TreeNode.from_newick is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use TreeNode.read.",
+            DeprecationWarning)
+
+        def _new_child(old_node):
+            """Returns new_node which has old_node as its parent."""
+            new_node = cls()
+            new_node.parent = old_node
+            if old_node is not None:
+                if new_node not in old_node.children:
+                    old_node.children.append(new_node)
+            return new_node
+
+        if isinstance(lines, str):
+            data = lines
+        else:
+            data = ''.join(lines)
+
+        # skip arb comment stuff if present: start at first paren
+        paren_index = data.find('(')
+        data = data[paren_index:]
+        left_count = data.count('(')
+        right_count = data.count(')')
+
+        if left_count != right_count:
+            raise RecordError("Found %s left parens but %s right parens." %
+                              (left_count, right_count))
+
+        curr_node = None
+        state = 'PreColon'
+        state1 = 'PreClosed'
+        last_token = None
+
+        for t in _dnd_tokenizer(data):
+            if t == ':':
+                # expecting branch length
+                state = 'PostColon'
+                # prevent state reset
+                last_token = t
+                continue
+            if t == ')' and last_token in ',(':
+                # node without name
+                new_node = _new_child(curr_node)
+                new_node.name = None
+                curr_node = new_node.parent
+                state1 = 'PostClosed'
+                last_token = t
+                continue
+            if t == ')':
+                # closing the current node
+                curr_node = curr_node.parent
+                state1 = 'PostClosed'
+                last_token = t
+                continue
+            if t == '(':
+                # opening a new node
+                curr_node = _new_child(curr_node)
+            elif t == ';':  # end of data
+                last_token = t
+                break
+            elif t == ',' and last_token in ',(':
+                # node without name
+                new_node = _new_child(curr_node)
+                new_node.name = None
+                curr_node = new_node.parent
+            elif t == ',':
+                # separator: next node adds to this node's parent
+                curr_node = curr_node.parent
+            elif state == 'PreColon' and state1 == 'PreClosed':
+                # data for the current node
+                new_node = _new_child(curr_node)
+                if unescape_name:
+                    if t.startswith("'") and t.endswith("'"):
+                        while t.startswith("'") and t.endswith("'"):
+                            t = t[1:-1]
+                    else:
+                        if '_' in t:
+                            t = t.replace('_', ' ')
+                new_node.name = t
+                curr_node = new_node
+            elif state == 'PreColon' and state1 == 'PostClosed':
+                if unescape_name:
+                    while t.startswith("'") and t.endswith("'"):
+                        t = t[1:-1]
+                curr_node.name = t
+            elif state == 'PostColon':
+                # length data for the current node
+                curr_node.length = float(t)
+            else:
+                # can't think of a reason to get here
+                raise RecordError("Incorrect PhyloNode state? %s" % t)
+            state = 'PreColon'  # get here for any non-colon token
+            state1 = 'PreClosed'
+            last_token = t
+
+        if curr_node is not None and curr_node.parent is not None:
+            raise RecordError("Didn't get back to root of tree. The newick "
+                              "string may be malformed.")
+
+        if curr_node is None:  # no data -- return empty node
+            return cls()
+        return curr_node  # this should be the root of the tree
+
+    def to_taxonomy(self, allow_empty=False, filter_f=None):
+        """Returns a taxonomy representation of self
+
+        Parameters
+        ----------
+        allow_empty : bool, optional
+            Allow gaps the taxonomy (e.g., internal nodes without names).
+        filter_f : function, optional
+            Specify a filtering function that returns True if the lineage is
+            to be returned. This function must accept a ``TreeNode`` as its
+            first parameter, and a ``list`` that represents the lineage as the
+            second parameter.
+
+        Returns
+        -------
+        generator
+            (tip, [lineage]) where tip corresponds to a tip in the tree and
+            the [lineage] is the expanded names from root to tip. Nones and
+            empty strings are omitted from the lineage
+
+        Notes
+        -----
+        If ``allow_empty`` is ``True`` and the root node does not have a name,
+        then that name will not be included. This is because it is common to
+        have multiple domains represented in the taxonomy, which would result
+        in a root node that does not have a name and does not make sense to
+        represent in the output.
+
+        Examples
+        --------
+        >>> from skbio.tree import TreeNode
+        >>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
+        ...             '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
+        ...             '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
+        ...             '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
+        ...             '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
+        ...             '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
+        ...             '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
+        ...             '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
+        ...             '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
+        >>> tree = TreeNode.from_taxonomy(lineages.items())
+        >>> lineages = sorted([(n.name, l) for n, l in tree.to_taxonomy()])
+        >>> for name, lineage in lineages:
+        ...     print(name, '; '.join(lineage))
+        1 Bacteria; Firmicutes; Clostridia
+        2 Bacteria; Firmicutes; Bacilli
+        3 Bacteria; Bacteroidetes; Sphingobacteria
+        4 Archaea; Euryarchaeota; Thermoplasmata
+        5 Archaea; Euryarchaeota; Thermoplasmata
+        6 Archaea; Euryarchaeota; Halobacteria
+        7 Archaea; Euryarchaeota; Halobacteria
+        8 Bacteria; Bacteroidetes; Sphingobacteria
+        9 Bacteria; Bacteroidetes; Cytophagia
+
+        """
+        if filter_f is None:
+            def filter_f(a, b):
+                return True
+
+        self.assign_ids()
+        seen = set()
+        lineage = []
+
+        # visit internal nodes while traversing out to the tips, and on the
+        # way back up
+        for node in self.traverse(self_before=True, self_after=True):
+            if node.is_tip():
+                if filter_f(node, lineage):
+                    yield (node, lineage[:])
+            else:
+                if allow_empty:
+                    if node.is_root() and not node.name:
+                        continue
+                else:
+                    if not node.name:
+                        continue
+
+                if node.id in seen:
+                    lineage.pop(-1)
+                else:
+                    lineage.append(node.name)
+                    seen.add(node.id)
+
+    def to_array(self, attrs=None):
+        """Return an array representation of self
+
+        Parameters
+        ----------
+        attrs : list of tuple or None
+            The attributes and types to return. The expected form is
+            [(attribute_name, type)]. If `None`, then `name`, `length`, and
+            `id` are returned.
+
+        Returns
+        -------
+        dict of array
+            {id_index: {id: TreeNode},
+             child_index: [(node_id, left_child_id, right_child_id)],
+             attr_1: array(...),
+             ...
+             attr_N: array(...)}
+
+        Notes
+        -----
+        Attribute arrays are in index order such that TreeNode.id can be used
+        as a lookup into the the array
+
+        If `length` is an attribute, this will also record the length off the
+        root which is `nan`. Take care when summing.
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> t = TreeNode.read(StringIO('(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'))
+        >>> res = t.to_array()
+        >>> res.keys()
+        ['child_index', 'length', 'name', 'id_index', 'id']
+        >>> res['child_index']
+        [(4, 0, 2), (5, 3, 3), (6, 4, 5), (7, 6, 6)]
+        >>> for k, v in res['id_index'].items():
+        ...     print(k, v)
+        ...
+        0 a:1.0;
+        <BLANKLINE>
+        1 b:2.0;
+        <BLANKLINE>
+        2 c:3.0;
+        <BLANKLINE>
+        3 d:5.0;
+        <BLANKLINE>
+        4 (a:1.0,b:2.0,c:3.0)x:4.0;
+        <BLANKLINE>
+        5 (d:5.0)y:6.0;
+        <BLANKLINE>
+        6 ((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0;
+        <BLANKLINE>
+        7 (((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0);
+        <BLANKLINE>
+        >>> res['id']
+        array([0, 1, 2, 3, 4, 5, 6, 7])
+        >>> res['name']
+        array(['a', 'b', 'c', 'd', 'x', 'y', 'z', None], dtype=object)
+
+        """
+        if attrs is None:
+            attrs = [('name', object), ('length', float), ('id', int)]
+        else:
+            for attr, dtype in attrs:
+                if not hasattr(self, attr):
+                    raise AttributeError("Invalid attribute '%s'." % attr)
+
+        id_index, child_index = self.index_tree()
+        n = self.id + 1  # assign_ids starts at 0
+        tmp = [np.zeros(n, dtype=dtype) for attr, dtype in attrs]
+
+        for node in self.traverse(include_self=True):
+            n_id = node.id
+            for idx, (attr, dtype) in enumerate(attrs):
+                tmp[idx][n_id] = getattr(node, attr)
+
+        results = {'id_index': id_index, 'child_index': child_index}
+        results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)})
+        return results
+
+    def to_newick(self, with_distances=False, semicolon=True,
+                  escape_name=True):
+        r"""Return the newick string representation of this tree.
+
+        .. note:: Deprecated in scikit-bio 0.2.0-dev
+           ``to_newick`` will be removed in scikit-bio 0.3.0. It is replaced by
+           ``write``, which is a more general method for serializing TreeNode
+           instances. ``write`` supports multiple file formats by taking
+           advantage of scikit-bio's I/O registry system. See :mod:`skbio.io`
+           for more details.
+
+        Please see `TreeNode.from_newick` for a further description of the
+        Newick format.
+
+        Parameters
+        ----------
+        with_distances : bool
+            If true, include lengths between nodes
+        semicolon : bool
+            If true, terminate the tree string with a semicolon
+        escape_name : bool
+            If true, wrap node names that include []'"(),:;_ in single quotes
+
+        Returns
+        -------
+        str
+            A Newick string representation of the tree
+
+        See Also
+        --------
+        from_newick
+
+        Examples
+        --------
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> print(tree.to_newick())
+        ((a,b)c,(d,e)f)root;
+
+        """
+        warnings.warn(
+            "TreeNode.to_newick is deprecated and will be removed in "
+            "scikit-bio 0.3.0. Please update your code to use TreeNode.write.",
+            DeprecationWarning)
+        result = ['(']
+        nodes_stack = [[self, len(self.children)]]
+        node_count = 1
+
+        while nodes_stack:
+            node_count += 1
+            # check the top node, any children left unvisited?
+            top = nodes_stack[-1]
+            top_node, num_unvisited_children = top
+            if num_unvisited_children:  # has any child unvisited
+                top[1] -= 1  # decrease the #of children unvisited
+                next_child = top_node.children[-num_unvisited_children]
+                # pre-visit
+                if next_child.children:
+                    result.append('(')
+                nodes_stack.append([next_child, len(next_child.children)])
+            else:  # no unvisited children
+                nodes_stack.pop()
+                # post-visit
+                if top_node.children:
+                    result[-1] = ')'
+
+                if top_node.name is None:
+                    name = ''
+                else:
+                    name = str(top_node.name)
+                    if escape_name and not (name.startswith("'") and
+                                            name.endswith("'")):
+                        if re.search("""[]['"(),:;_]""", name):
+                            name = "'%s'" % name.replace("'", "''")
+                        else:
+                            name = name.replace(' ', '_')
+                result.append(name)
+
+                if with_distances and top_node.length is not None:
+                    result[-1] = "%s:%s" % (result[-1], top_node.length)
+
+                result.append(',')
+
+        if len(result) <= 3:  # single node with or without name
+            if semicolon:
+                return "%s;" % result[1]
+            else:
+                return result[1]
+        else:
+            if semicolon:
+                result[-1] = ';'
+            else:
+                result.pop(-1)
+            return ''.join(result)
+
+    def _ascii_art(self, char1='-', show_internal=True, compact=False):
+        LEN = 10
+        PAD = ' ' * LEN
+        PA = ' ' * (LEN - 1)
+        namestr = self.name or ''  # prevents name of NoneType
+        if self.children:
+            mids = []
+            result = []
+            for c in self.children:
+                if c is self.children[0]:
+                    char2 = '/'
+                elif c is self.children[-1]:
+                    char2 = '\\'
+                else:
+                    char2 = '-'
+                (clines, mid) = c._ascii_art(char2, show_internal, compact)
+                mids.append(mid + len(result))
+                result.extend(clines)
+                if not compact:
+                    result.append('')
+            if not compact:
+                result.pop()
+            (lo, hi, end) = (mids[0], mids[-1], len(result))
+            prefixes = [PAD] * (lo + 1) + [PA + '|'] * \
+                (hi - lo - 1) + [PAD] * (end - hi)
+            mid = np.int(np.trunc((lo + hi) / 2))
+            prefixes[mid] = char1 + '-' * (LEN - 2) + prefixes[mid][-1]
+            result = [p + l for (p, l) in zip(prefixes, result)]
+            if show_internal:
+                stem = result[mid]
+                result[mid] = stem[0] + namestr + stem[len(namestr) + 1:]
+            return (result, mid)
+        else:
+            return ([char1 + '-' + namestr], 0)
+
+    def ascii_art(self, show_internal=True, compact=False):
+        r"""Returns a string containing an ascii drawing of the tree
+
+        Note, this method calls a private recursive function and is not safe
+        for large trees.
+
+        Parameters
+        ----------
+        show_internal : bool
+            includes internal edge names
+        compact : bool
+            use exactly one line per tip
+
+        Returns
+        -------
+        str
+            an ASCII formatted version of the tree
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> print(tree.ascii_art())
+                            /-a
+                  /c-------|
+                 |          \-b
+        -root----|
+                 |          /-d
+                  \f-------|
+                            \-e
+        """
+        (lines, mid) = self._ascii_art(show_internal=show_internal,
+                                       compact=compact)
+        return '\n'.join(lines)
+
+    def accumulate_to_ancestor(self, ancestor):
+        r"""Return the sum of the distance between self and ancestor
+
+        Parameters
+        ----------
+        ancestor : TreeNode
+            The node of the ancestor to accumulate distance too
+
+        Returns
+        -------
+        float
+            The sum of lengths between self and ancestor
+
+        Raises
+        ------
+        NoParentError
+            A NoParentError is raised if the ancestor is not an ancestor of
+            self
+        NoLengthError
+            A NoLengthError is raised if one of the nodes between self and
+            ancestor (including self) lacks a `length` attribute
+
+        See Also
+        --------
+        distance
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> root = tree
+        >>> tree.find('a').accumulate_to_ancestor(root)
+        4.0
+        """
+        accum = 0.0
+        curr = self
+        while curr is not ancestor:
+            if curr.is_root():
+                raise NoParentError("Provided ancestor is not in the path")
+
+            if curr.length is None:
+                raise NoLengthError("No length on node %s found!" %
+                                    curr.name or "unnamed")
+
+            accum += curr.length
+            curr = curr.parent
+
+        return accum
+
+    def distance(self, other):
+        """Return the distance between self and other
+
+        This method can be used to compute the distances between two tips,
+        however, it is not optimized for computing pairwise tip distances.
+
+        Parameters
+        ----------
+        other : TreeNode
+            The node to compute a distance to
+
+        Returns
+        -------
+        float
+            The distance between two nodes
+
+        Raises
+        ------
+        NoLengthError
+            A NoLengthError will be raised if a node without `length` is
+            encountered
+
+        See Also
+        --------
+        tip_tip_distances
+        accumulate_to_ancestor
+        compare_tip_distances
+        get_max_distance
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> tip_a = tree.find('a')
+        >>> tip_d = tree.find('d')
+        >>> tip_a.distance(tip_d)
+        14.0
+        """
+        if self is other:
+            return 0.0
+
+        root = self.root()
+        lca = root.lowest_common_ancestor([self, other])
+        accum = self.accumulate_to_ancestor(lca)
+        accum += other.accumulate_to_ancestor(lca)
+
+        return accum
+
+    def _set_max_distance(self):
+        """Propagate tip distance information up the tree
+
+        This method was originally implemented by Julia Goodrich with the
+        intent of being able to determine max tip to tip distances between
+        nodes on large trees efficiently. The code has been modified to track
+        the specific tips the distance is between
+        """
+        for n in self.postorder():
+            if n.is_tip():
+                n.MaxDistTips = [[0.0, n], [0.0, n]]
+            else:
+                if len(n.children) == 1:
+                    raise TreeError("No support for single descedent nodes")
+                else:
+                    tip_info = [(max(c.MaxDistTips), c) for c in n.children]
+                    dists = [i[0][0] for i in tip_info]
+                    best_idx = np.argsort(dists)[-2:]
+                    tip_a, child_a = tip_info[best_idx[0]]
+                    tip_b, child_b = tip_info[best_idx[1]]
+                    tip_a[0] += child_a.length or 0.0
+                    tip_b[0] += child_b.length or 0.0
+                n.MaxDistTips = [tip_a, tip_b]
+
+    def _get_max_distance_singledesc(self):
+        """returns the max distance between any pair of tips
+
+        Also returns the tip names  that it is between as a tuple"""
+        distmtx = self.tip_tip_distances()
+        idx_max = divmod(distmtx.data.argmax(), distmtx.shape[1])
+        max_pair = (distmtx.ids[idx_max[0]], distmtx.ids[idx_max[1]])
+        return distmtx[idx_max], max_pair
+
+    def get_max_distance(self):
+        """Returns the max tip tip distance between any pair of tips
+
+        Returns
+        -------
+        float
+            The distance between the two most distant tips in the tree
+        tuple of TreeNode
+            The two most distant tips in the tree
+
+        Raises
+        ------
+        NoLengthError
+            A NoLengthError will be thrown if a node without length is
+            encountered
+
+        See Also
+        --------
+        distance
+        tip_tip_distances
+        compare_tip_distances
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> dist, tips = tree.get_max_distance()
+        >>> dist
+        16.0
+        >>> [n.name for n in tips]
+        ['b', 'e']
+        """
+        if not hasattr(self, 'MaxDistTips'):
+            # _set_max_distance will throw a TreeError if a node with a single
+            # child is encountered
+            try:
+                self._set_max_distance()
+            except TreeError:  #
+                return self._get_max_distance_singledesc()
+
+        longest = 0.0
+        tips = [None, None]
+        for n in self.non_tips(include_self=True):
+            tip_a, tip_b = n.MaxDistTips
+            dist = (tip_a[0] + tip_b[0])
+
+            if dist > longest:
+                longest = dist
+                tips = [tip_a[1], tip_b[1]]
+        return longest, tips
+
+    def tip_tip_distances(self, endpoints=None):
+        """Returns distance matrix between pairs of tips, and a tip order.
+
+        By default, all pairwise distances are calculated in the tree. If
+        `endpoints` are specified, then only the distances between those tips
+        are computed.
+
+        Parameters
+        ----------
+        endpoints : list of TreeNode or str, or None
+            A list of TreeNode objects or names of TreeNode objects
+
+        Returns
+        -------
+        DistanceMatrix
+            The distance matrix
+
+        Raises
+        ------
+        ValueError
+            If any of the specified `endpoints` are not tips
+        NoLengthError
+            If a node without length is encountered
+
+        See Also
+        --------
+        distance
+        compare_tip_distances
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> mat = tree.tip_tip_distances()
+        >>> print(mat)
+        4x4 distance matrix
+        IDs:
+        'a', 'b', 'd', 'e'
+        Data:
+        [[  0.   3.  14.  15.]
+         [  3.   0.  15.  16.]
+         [ 14.  15.   0.   9.]
+         [ 15.  16.   9.   0.]]
+
+        """
+        all_tips = list(self.tips())
+        if endpoints is None:
+            tip_order = all_tips
+        else:
+            tip_order = [self.find(n) for n in endpoints]
+            for n in tip_order:
+                if not n.is_tip():
+                    raise ValueError("Node with name '%s' is not a tip." %
+                                     n.name)
+
+        # linearize all tips in postorder
+        # .__start, .__stop compose the slice in tip_order.
+        for i, node in enumerate(all_tips):
+            node.__start, node.__stop = i, i + 1
+
+        # the result map provides index in the result matrix
+        result_map = {n.__start: i for i, n in enumerate(tip_order)}
+        num_all_tips = len(all_tips)  # total number of tips
+        num_tips = len(tip_order)  # total number of tips in result
+        result = np.zeros((num_tips, num_tips), float)  # tip by tip matrix
+        distances = np.zeros((num_all_tips), float)  # dist from tip to tip
+
+        def update_result():
+            # set tip_tip distance between tips of different child
+            for child1, child2 in combinations(node.children, 2):
+                for tip1 in range(child1.__start, child1.__stop):
+                    if tip1 not in result_map:
+                        continue
+                    t1idx = result_map[tip1]
+                    for tip2 in range(child2.__start, child2.__stop):
+                        if tip2 not in result_map:
+                            continue
+                        t2idx = result_map[tip2]
+                        result[t1idx, t2idx] = distances[
+                            tip1] + distances[tip2]
+
+        for node in self.postorder():
+            if not node.children:
+                continue
+            # subtree with solved child wedges
+            # can possibly use np.zeros
+            starts, stops = [], []  # to calc ._start and ._stop for curr node
+            for child in node.children:
+                if child.length is None:
+                    raise NoLengthError("Node with name '%s' doesn't have a "
+                                        "length." % child.name)
+
+                distances[child.__start:child.__stop] += child.length
+
+                starts.append(child.__start)
+                stops.append(child.__stop)
+
+            node.__start, node.__stop = min(starts), max(stops)
+
+            if len(node.children) > 1:
+                update_result()
+
+        return DistanceMatrix(result + result.T, [n.name for n in tip_order])
+
+    def compare_rfd(self, other, proportion=False):
+        """Calculates the Robinson and Foulds symmetric difference
+
+        Parameters
+        ----------
+        other : TreeNode
+            A tree to compare against
+        proportion : bool
+            Return a proportional difference
+
+        Returns
+        -------
+        float
+            The distance between the trees
+
+        Notes
+        -----
+        Implementation based off of code by Julia Goodrich. The original
+        description of the algorithm can be found in [1]_.
+
+        Raises
+        ------
+        ValueError
+            If the tip names between `self` and `other` are equal.
+
+        See Also
+        --------
+        compare_subsets
+        compare_tip_distances
+
+        References
+        ----------
+        .. [1] Comparison of phylogenetic trees. Robinson and Foulds.
+           Mathematical Biosciences. 1981. 53:131-141
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree1 = TreeNode.read(StringIO("((a,b),(c,d));"))
+        >>> tree2 = TreeNode.read(StringIO("(((a,b),c),d);"))
+        >>> tree1.compare_rfd(tree2)
+        2.0
+
+        """
+        t1names = {n.name for n in self.tips()}
+        t2names = {n.name for n in other.tips()}
+
+        if t1names != t2names:
+            if t1names < t2names:
+                tree1 = self
+                tree2 = other.shear(t1names)
+            else:
+                tree1 = self.shear(t2names)
+                tree2 = other
+        else:
+            tree1 = self
+            tree2 = other
+
+        tree1_sets = tree1.subsets()
+        tree2_sets = tree2.subsets()
+
+        not_in_both = tree1_sets.symmetric_difference(tree2_sets)
+
+        dist = float(len(not_in_both))
+
+        if proportion:
+            total_subsets = len(tree1_sets) + len(tree2_sets)
+            dist = dist / total_subsets
+
+        return dist
+
+    def compare_subsets(self, other, exclude_absent_taxa=False):
+        """Returns fraction of overlapping subsets where self and other differ.
+
+        Names present in only one of the two trees will count as mismatches,
+        if you don't want this behavior, strip out the non-matching tips first.
+
+        Parameters
+        ----------
+        other : TreeNode
+            The tree to compare
+        exclude_absent_taxa : bool
+            Strip out names that don't occur in both trees
+
+        Returns
+        -------
+        float
+            The fraction of overlapping subsets that differ between the trees
+
+        See Also
+        --------
+        compare_rfd
+        compare_tip_distances
+        subsets
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree1 = TreeNode.read(StringIO("((a,b),(c,d));"))
+        >>> tree2 = TreeNode.read(StringIO("(((a,b),c),d);"))
+        >>> tree1.compare_subsets(tree2)
+        0.5
+
+        """
+        self_sets, other_sets = self.subsets(), other.subsets()
+
+        if exclude_absent_taxa:
+            in_both = self.subset() & other.subset()
+            self_sets = (i & in_both for i in self_sets)
+            self_sets = frozenset({i for i in self_sets if len(i) > 1})
+            other_sets = (i & in_both for i in other_sets)
+            other_sets = frozenset({i for i in other_sets if len(i) > 1})
+
+        total_subsets = len(self_sets) + len(other_sets)
+        intersection_length = len(self_sets & other_sets)
+
+        if not total_subsets:  # no common subsets after filtering, so max dist
+            return 1
+
+        return 1 - (2 * intersection_length / float(total_subsets))
+
+    def compare_tip_distances(self, other, sample=None, dist_f=distance_from_r,
+                              shuffle_f=np.random.shuffle):
+        """Compares self to other using tip-to-tip distance matrices.
+
+        Value returned is `dist_f(m1, m2)` for the two matrices. Default is
+        to use the Pearson correlation coefficient, with +1 giving a distance
+        of 0 and -1 giving a distance of +1 (the maximum possible value).
+        Depending on the application, you might instead want to use
+        distance_from_r_squared, which counts correlations of both +1 and -1
+        as identical (0 distance).
+
+        Note: automatically strips out the names that don't match (this is
+        necessary for this method because the distance between non-matching
+        names and matching names is undefined in the tree where they don't
+        match, and because we need to reorder the names in the two trees to
+        match up the distance matrices).
+
+        Parameters
+        ----------
+        other : TreeNode
+            The tree to compare
+        sample : int or None
+            Randomly subsample the tips in common between the trees to
+            compare. This is useful when comparing very large trees.
+        dist_f : function
+            The distance function used to compare two the tip-tip distance
+            matrices
+        shuffle_f : function
+            The shuffling function used if `sample` is not None
+
+        Returns
+        -------
+        float
+            The distance between the trees
+
+        Raises
+        ------
+        ValueError
+            A ValueError is raised if there does not exist common tips
+            between the trees
+
+        See Also
+        --------
+        compare_subsets
+        compare_rfd
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> # note, only three common taxa between the trees
+        >>> tree1 = TreeNode.read(StringIO("((a:1,b:1):2,(c:0.5,X:0.7):3);"))
+        >>> tree2 = TreeNode.read(StringIO("(((a:1,b:1,Y:1):2,c:3):1,Z:4);"))
+        >>> dist = tree1.compare_tip_distances(tree2)
+        >>> print("%.9f" % dist)
+        0.000133446
+
+        """
+        self_names = {i.name: i for i in self.tips()}
+        other_names = {i.name: i for i in other.tips()}
+        common_names = frozenset(self_names) & frozenset(other_names)
+        common_names = list(common_names)
+
+        if not common_names:
+            raise ValueError("No tip names in common between the two trees.")
+
+        if len(common_names) <= 2:
+            return 1  # the two trees must match by definition in this case
+
+        if sample is not None:
+            shuffle_f(common_names)
+            common_names = common_names[:sample]
+
+        self_nodes = [self_names[k] for k in common_names]
+        other_nodes = [other_names[k] for k in common_names]
+
+        self_matrix = self.tip_tip_distances(endpoints=self_nodes)
+        other_matrix = other.tip_tip_distances(endpoints=other_nodes)
+
+        return dist_f(self_matrix, other_matrix)
+
+    def index_tree(self):
+        """Index a tree for rapid lookups within a tree array
+
+        Indexes nodes in-place as `n._leaf_index`.
+
+        Returns
+        -------
+        dict
+            A mapping {node_id: TreeNode}
+        list of tuple of (int, int, int)
+            The first index in each tuple is the corresponding node_id. The
+            second index is the left most leaf index. The third index is the
+            right most leaf index
+        """
+        self.assign_ids()
+
+        id_index = {}
+        child_index = []
+
+        for n in self.postorder():
+            for c in n.children:
+                id_index[c.id] = c
+
+                if c:
+                    # c has children itself, so need to add to result
+                    child_index.append((c.id,
+                                        c.children[0].id,
+                                        c.children[-1].id))
+
+        # handle root, which should be t itself
+        id_index[self.id] = self
+
+        # only want to add to the child_index if self has children...
+        if self.children:
+            child_index.append((self.id,
+                                self.children[0].id,
+                                self.children[-1].id))
+
+        return id_index, child_index
+
+    def assign_ids(self):
+        """Assign topologically stable unique ids to self
+
+        Following the call, all nodes in the tree will have their id
+        attribute set
+        """
+        curr_index = 0
+        for n in self.postorder():
+            for c in n.children:
+                c.id = curr_index
+                curr_index += 1
+
+        self.id = curr_index
+
+    def descending_branch_length(self, tip_subset=None):
+        """Find total descending branch length from self or subset of self tips
+
+        Parameters
+        ----------
+        tip_subset : Iterable, or None
+            If None, the total descending branch length for all tips in the
+            tree will be returned. If a list of tips is provided then only the
+            total descending branch length associated with those tips will be
+            returned.
+
+        Returns
+        -------
+        float
+            The total descending branch length for the specified set of tips.
+
+        Raises
+        ------
+        ValueError
+            A ValueError is raised if the list of tips supplied to tip_subset
+            contains internal nodes or non-tips.
+
+        Notes
+        -----
+        This function replicates cogent's totalDescendingBranch Length method
+        and extends that method to allow the calculation of total descending
+        branch length of a subset of the tips if requested. The postorder
+        guarantees that the function will always be able to add the descending
+        branch length if the node is not a tip.
+
+        Nodes with no length will have their length set to 0. The root length
+        (if it exists) is ignored.
+
+        Examples
+        --------
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tr = TreeNode.read(StringIO("(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G"
+        ...                             ":2.4,(H:.4,I:.5)J:1.3)K;"))
+        >>> tdbl = tr.descending_branch_length()
+        >>> sdbl = tr.descending_branch_length(['A','E'])
+        >>> print(tdbl, sdbl)
+        8.9 2.2
+        """
+        self.assign_ids()
+        if tip_subset is not None:
+            all_tips = self.subset()
+            if not set(tip_subset).issubset(all_tips):
+                raise ValueError('tip_subset contains ids that arent tip '
+                                 'names.')
+
+            lca = self.lowest_common_ancestor(tip_subset)
+            ancestors = {}
+            for tip in tip_subset:
+                curr = self.find(tip)
+                while curr is not lca:
+                    ancestors[curr.id] = curr.length if curr.length is not \
+                        None else 0.0
+                    curr = curr.parent
+            return sum(ancestors.values())
+
+        else:
+            return sum(n.length for n in self.postorder(include_self=True) if
+                       n.length is not None)
+
+    def cache_attr(self, func, cache_attrname, cache_type=list):
+        """Cache attributes on internal nodes of the tree
+
+        Parameters
+        ----------
+        func : function
+            func will be provided the node currently being evaluated and must
+            return a list of item (or items) to cache from that node or an
+            empty list.
+        cache_attrname : str
+            Name of the attribute to decorate on containing the cached values
+        cache_type : {set, frozenset, list}
+            The type of the cache
+
+        Notes
+        -----
+        This method is particularly useful if you need to frequently look up
+        attributes that would normally require a traversal of the tree.
+
+        WARNING: any cache created by this method will be invalidated if the
+        topology of the tree changes (e.g., if `TreeNode.invalidate_caches` is
+        called).
+
+        Raises
+        ------
+        TypeError
+            If an cache_type that is not a `set` or a `list` is specified.
+
+        Examples
+        --------
+        Cache the tip names of the tree on its internal nodes
+
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
+        >>> f = lambda n: [n.name] if n.is_tip() else []
+        >>> tree.cache_attr(f, 'tip_names')
+        >>> for n in tree.traverse(include_self=True):
+        ...     print("Node name: %s, cache: %r" % (n.name, n.tip_names))
+        Node name: root, cache: ['a', 'b', 'c', 'd', 'g', 'h']
+        Node name: f, cache: ['a', 'b', 'c', 'd']
+        Node name: a, cache: ['a']
+        Node name: b, cache: ['b']
+        Node name: e, cache: ['c', 'd']
+        Node name: c, cache: ['c']
+        Node name: d, cache: ['d']
+        Node name: i, cache: ['g', 'h']
+        Node name: g, cache: ['g']
+        Node name: h, cache: ['h']
+
+        """
+        if cache_type in [set, frozenset]:
+            def reduce_f(a, b):
+                return a | b
+
+        elif cache_type == list:
+            def reduce_f(a, b):
+                return a + b
+
+        else:
+            raise TypeError("Only list, set and frozenset are supported!")
+
+        for node in self.postorder(include_self=True):
+            node._registered_caches.add(cache_attrname)
+
+            cached = [getattr(c, cache_attrname) for c in node.children]
+            cached.append(cache_type(func(node)))
+            setattr(node, cache_attrname, reduce(reduce_f, cached))
+
+    def shuffle(self, k=None, names=None, shuffle_f=np.random.shuffle, n=1):
+        """Yield trees with shuffled tip names
+
+        Parameters
+        ----------
+        k : int, optional
+            The number of tips to shuffle. If k is not `None`, k tips are
+            randomly selected, and only those names will be shuffled.
+        names : list, optional
+            The specific tip names to shuffle. k and names cannot be specified
+            at the same time.
+        shuffle_f : func
+            Shuffle method, this function must accept a list and modify
+            inplace.
+        n : int, optional
+            The number of iterations to perform. Value must be > 0 and `np.inf`
+            can be specified for an infinite number of iterations.
+
+        Notes
+        -----
+        Tip names are shuffled inplace. If neither `k` nor `names` are
+        provided, all tips are shuffled.
+
+        Returns
+        -------
+        GeneratorType
+            Yielding TreeNode
+
+        Raises
+        ------
+        ValueError
+            If `k` is < 2
+            If `n` is < 1
+        ValueError
+            If both `k` and `names` are specified
+        MissingNodeError
+            If `names` is specified but one of the names cannot be found
+
+        Examples
+        --------
+        Alternate the names on two of the tips, 'a', and 'b', and do this 5
+        times.
+
+        >>> from six import StringIO
+        >>> from skbio import TreeNode
+        >>> tree = TreeNode.read(StringIO("((a,b),(c,d));"))
+        >>> rev = lambda items: items.reverse()
+        >>> shuffler = tree.shuffle(names=['a', 'b'], shuffle_f=rev, n=5)
+        >>> for shuffled_tree in shuffler:
+        ...     print(shuffled_tree)
+        ((b,a),(c,d));
+        <BLANKLINE>
+        ((a,b),(c,d));
+        <BLANKLINE>
+        ((b,a),(c,d));
+        <BLANKLINE>
+        ((a,b),(c,d));
+        <BLANKLINE>
+        ((b,a),(c,d));
+        <BLANKLINE>
+
+        """
+        if k is not None and k < 2:
+            raise ValueError("k must be None or >= 2")
+        if k is not None and names is not None:
+            raise ValueError("n and names cannot be specified at the sametime")
+        if n < 1:
+            raise ValueError("n must be > 0")
+
+        self.assign_ids()
+
+        if names is None:
+            all_tips = list(self.tips())
+
+            if n is None:
+                n = len(all_tips)
+
+            shuffle_f(all_tips)
+            names = [tip.name for tip in all_tips[:k]]
+
+        nodes = [self.find(name) for name in names]
+
+        # Since the names are being shuffled, the association between ID and
+        # name is no longer reliable
+        self.invalidate_caches()
+
+        counter = 0
+        while counter < n:
+            shuffle_f(names)
+            for node, name in zip(nodes, names):
+                node.name = name
+
+            yield self
+            counter += 1
+
+
+def _dnd_tokenizer(data):
+    r"""Tokenizes data into a stream of punctuation, labels and lengths.
+
+    Parameters
+    ----------
+    data : str
+        a DND-like (e.g., newick) string
+
+    Returns
+    -------
+    GeneratorType
+        Yields successive DND tokens
+
+    See Also
+    --------
+    TreeNode.from_newick
+    TreeNode.to_newick
+
+    Examples
+    --------
+    >>> from skbio.tree._tree import _dnd_tokenizer
+    >>> for token in _dnd_tokenizer("((tip1, tip2)internal1)"):
+    ...     print(token)
+    (
+    (
+    tip1
+    ,
+    tip2
+    )
+    internal1
+    )
+
+    """
+    dnd_tokens = set('(:),;')
+
+    in_quotes = False
+    saved = []
+    sa = saved.append
+    for d in data:
+        if d == "'":
+            in_quotes = not in_quotes
+        if d in dnd_tokens and not in_quotes:
+            curr = ''.join(saved).strip()
+            if curr:
+                yield curr
+            yield d
+            saved = []
+            sa = saved.append
+        else:
+            sa(d)
diff --git a/skbio/tree/_trie.py b/skbio/tree/_trie.py
new file mode 100644
index 0000000..32a020c
--- /dev/null
+++ b/skbio/tree/_trie.py
@@ -0,0 +1,264 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import viewitems
+
+
+class _CompressedNode(object):
+    """Represents a node in the compressed trie
+
+    Parameters
+    ----------
+    key : string
+        the key attached to the node
+
+    values : list of objects, optional
+        the values attached to this node
+
+    Attributes
+    ----------
+    values : list of objects
+        the values attached to this node
+    key : string
+        the key attached to the node
+    children : dict of {string: _CompressedNode}
+        the children nodes below this node
+    """
+
+    def __init__(self, key, values=None):
+        self.values = values or []
+        self.key = key
+        self.children = {}
+
+    def __nonzero__(self):
+        return (self.key != "" or len(self.values) > 0
+                or len(self.children.keys()) > 0)
+
+    def __len__(self):
+        """Returns the number of values attached to the node
+
+        .. warning:: This method is recursive
+        """
+        return sum(len(n) for n in self.children.values()) + len(self.values)
+
+    @property
+    def size(self):
+        """int with the number of nodes below the node
+
+        .. warning:: This method is recursive
+        """
+        return sum(n.size for n in self.children.values()) + 1
+
+    @property
+    def prefix_map(self):
+        """Dict with the prefix map
+
+        Dictionary of {values: list of values} containing the prefix map
+            of this node
+        """
+        mapping = {}
+
+        if len(self.children) == 0:
+            # we have a leaf
+            mapping = {self.values[0]: self.values[1:]}
+        else:
+            # we are at an internal node
+            for child in self.children.values():
+                mapping.update(child.prefix_map)
+            # get largest group
+            n = -1
+            key_largest = None
+            for key, value in viewitems(mapping):
+                if len(value) > n:
+                    n = len(value)
+                    key_largest = key
+            # append this node's values
+            mapping[key_largest].extend(self.values)
+
+        return mapping
+
+    def insert(self, key, value):
+        """Inserts key with value in the node
+
+        Parameters
+        ----------
+        key : string
+            The string key attached to the value
+
+        value : object
+            Object to attach to the key
+        """
+        node_key_len = len(self.key)
+        length = min(node_key_len, len(key))
+        # Follow the key into the tree
+        split_node = False
+        index = 0
+        while index < length and not split_node:
+            split_node = key[index] != self.key[index]
+            index += 1
+
+        if split_node:
+            # Index has been incremented after split_node was set to true,
+            # decrement it to make it work
+            index -= 1
+            # We need to split up the node pointed by index
+            # Get the key for the new node
+            new_key_node = _CompressedNode(key[index:], [value])
+            # Get a new node for the old key node
+            old_key_node = _CompressedNode(self.key[index:], self.values)
+            old_key_node.children = self.children
+            self.children = {key[index]: new_key_node,
+                             self.key[index]: old_key_node}
+            self.key = self.key[:index]
+            self.values = []
+        elif index == len(self.key) and index == len(key):
+            # The new key matches node key exactly
+            self.values.append(value)
+        elif index < node_key_len:
+            # Key shorter than node key
+            lower_node = _CompressedNode(self.key[index:], self.values)
+            lower_node.children = self.children
+            self.children = {self.key[index]: lower_node}
+            self.key = key
+            self.values = [value]
+        else:
+            # New key longer than current node key
+            node = self.children.get(key[index])
+            if node:
+                # insert into next node
+                node.insert(key[index:], value)
+            else:
+                # Create new node
+                new_node = _CompressedNode(key[index:], [value])
+                self.children[key[index]] = new_node
+
+    def find(self, key):
+        """Searches for key and returns values stored for the key.
+
+        Parameters
+        ----------
+        key : string
+            The key of the value to search for
+
+        Returns
+        -------
+        object
+            The value attached to the key
+        """
+        # key exhausted
+        if len(key) == 0:
+            return self.values
+
+        # find matching part of key and node_key
+        min_length = min(len(key), len(self.key))
+        keys_diff = False
+        index = 0
+        while index < min_length and not keys_diff:
+            keys_diff = key[index] != self.key[index]
+            index += 1
+
+        if keys_diff:
+            return []
+        elif index == len(key):
+            # key and node_key match exactly
+            return self.values
+        else:
+            node = self.children.get(key[index])
+            if node:
+                # descend to next node
+                return node.find(key[index:])
+        return []
+
+
+class CompressedTrie(object):
+    """ A compressed Trie for a list of (key, value) pairs
+
+    Parameters
+    ----------
+    pair_list : list of tuples, optional
+        List of (key, value) pairs to initialize the Trie
+
+    Attributes
+    ----------
+    size
+    prefix_map
+    """
+
+    def __init__(self, pair_list=None):
+        self._root = _CompressedNode("")
+        if pair_list:
+            for key, value in pair_list:
+                self.insert(key, value)
+
+    def __nonzero__(self):
+        return bool(self._root)
+
+    def __len__(self):
+        return len(self._root)
+
+    @property
+    def size(self):
+        """int with the number of nodes in the Trie"""
+        return self._root.size
+
+    @property
+    def prefix_map(self):
+        """Dict with the prefix map
+
+        Dictionary of {values: list of values} containing the prefix map
+        """
+        return self._root.prefix_map
+
+    def insert(self, key, value):
+        """Inserts key with value in Trie
+
+        Parameters
+        ----------
+        key : string
+            The string key attached to the value
+
+        value : object
+            Object to attach to the key
+        """
+        self._root.insert(key, value)
+
+    def find(self, key):
+        """Searches for key and returns values stored for the key.
+
+        Parameters
+        ----------
+        key : string
+
+
+        Returns
+        -------
+        object
+            The value attached to the key
+        """
+        return self._root.find(key)
+
+
+def fasta_to_pairlist(seqs):
+    """Yields (key, value) pairs, useful for populating a Trie object
+
+    Parameters
+    ----------
+    seqs : Iterable
+        tuples of the form ``(label, seq)``, e.g., as obtained by
+        skbio.parse.sequences.parse_fasta
+
+    Returns
+    -------
+    GeneratorType
+        yields tuples of the form ``(seq, label)``
+    """
+    for label, seq in seqs:
+        yield seq, label
diff --git a/skbio/tree/tests/__init__.py b/skbio/tree/tests/__init__.py
new file mode 100644
index 0000000..0bf0c55
--- /dev/null
+++ b/skbio/tree/tests/__init__.py
@@ -0,0 +1,7 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/tree/tests/test_majority_rule.py b/skbio/tree/tests/test_majority_rule.py
new file mode 100644
index 0000000..4c9bb73
--- /dev/null
+++ b/skbio/tree/tests/test_majority_rule.py
@@ -0,0 +1,171 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+from six import StringIO
+import numpy as np
+
+from skbio import TreeNode
+from skbio.tree import majority_rule
+from skbio.tree._majority_rule import (_walk_clades, _filter_clades,
+                                       _build_trees)
+
+
+class MajorityRuleTests(TestCase):
+    def test_majority_rule(self):
+        trees = [
+            TreeNode.read(StringIO("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));")),
+            TreeNode.read(StringIO("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));")),
+            TreeNode.read(StringIO("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));")),
+            TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));")),
+            TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));")),
+            TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));")),
+            TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));")),
+            TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));")),
+            TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));"))]
+
+        exp = TreeNode.read(StringIO("(((E,(G,(F,I),(C,(D,J,H)))),B),A);"))
+        obs = majority_rule(trees)
+        self.assertEqual(exp.compare_subsets(obs[0]), 0.0)
+        self.assertEqual(len(obs), 1)
+
+        tree = obs[0]
+        exp_supports = sorted([9.0, 9.0, 9.0, 6.0, 6.0, 6.0])
+        obs_supports = sorted([n.support for n in tree.non_tips()])
+        self.assertEqual(obs_supports, exp_supports)
+
+        obs = majority_rule(trees, weights=np.ones(len(trees)) * 2)
+        self.assertEqual(exp.compare_subsets(obs[0]), 0.0)
+        self.assertEqual(len(obs), 1)
+
+        tree = obs[0]
+        exp_supports = sorted([18.0, 18.0, 12.0, 18.0, 12.0, 12.0])
+        obs_supports = sorted([n.support for n in tree.non_tips()])
+
+        with self.assertRaises(ValueError):
+            majority_rule(trees, weights=[1, 2])
+
+    def test_majority_rule_multiple_trees(self):
+        trees = [
+            TreeNode.read(StringIO("((a,b),(c,d),(e,f));")),
+            TreeNode.read(StringIO("(a,(c,d),b,(e,f));")),
+            TreeNode.read(StringIO("((c,d),(e,f),b);")),
+            TreeNode.read(StringIO("(a,(c,d),(e,f));"))]
+
+        trees = majority_rule(trees)
+        self.assertEqual(len(trees), 4)
+
+        exp = set([
+                  frozenset(['a']),
+                  frozenset(['b']),
+                  frozenset([None, 'c', 'd']),
+                  frozenset([None, 'e', 'f'])])
+
+        obs = set([frozenset([n.name for n in t.traverse()]) for t in trees])
+        self.assertEqual(obs, exp)
+
+    def test_walk_clades(self):
+        trees = [TreeNode.read(StringIO("((A,B),(D,E));")),
+                 TreeNode.read(StringIO("((A,B),(D,(E,X)));"))]
+        exp_clades = [
+            (frozenset(['A']), 2.0),
+            (frozenset(['B']), 2.0),
+            (frozenset(['A', 'B']), 2.0),
+            (frozenset(['D', 'E']), 1.0),
+            (frozenset(['D', 'E', 'A', 'B']), 1.0),
+            (frozenset(['D']), 2.0),
+            (frozenset(['E']), 2.0),
+            (frozenset(['X']), 1.0),
+            (frozenset(['E', 'X']), 1.0),
+            (frozenset(['D', 'E', 'X']), 1.0),
+            (frozenset(['A', 'B', 'D', 'E', 'X']), 1.0)]
+
+        exp_lengths_nolength = {
+            frozenset(['A']): None,
+            frozenset(['B']): None,
+            frozenset(['A', 'B']): None,
+            frozenset(['D', 'E']): None,
+            frozenset(['D', 'E', 'A', 'B']): None,
+            frozenset(['D']): None,
+            frozenset(['E']): None,
+            frozenset(['X']): None,
+            frozenset(['E', 'X']): None,
+            frozenset(['D', 'E', 'X']): None,
+            frozenset(['A', 'B', 'D', 'E', 'X']): None}
+
+        exp_lengths = {
+            frozenset(['A']): 2.0,
+            frozenset(['B']): 2.0,
+            frozenset(['A', 'B']): 2.0,
+            frozenset(['D', 'E']): 1.0,
+            frozenset(['D', 'E', 'A', 'B']): 1.0,
+            frozenset(['D']): 2.0,
+            frozenset(['E']): 2.0,
+            frozenset(['X']): 1.0,
+            frozenset(['E', 'X']): 1.0,
+            frozenset(['D', 'E', 'X']): 1.0,
+            frozenset(['A', 'B', 'D', 'E', 'X']): 1.0}
+
+        obs_clades, obs_lengths = _walk_clades(trees, np.ones(len(trees)))
+        self.assertEqual(set(obs_clades), set(exp_clades))
+        self.assertEqual(obs_lengths, exp_lengths_nolength)
+
+        for t in trees:
+            for n in t.traverse(include_self=True):
+                n.length = 2.0
+
+        obs_clades, obs_lengths = _walk_clades(trees, np.ones(len(trees)))
+
+        self.assertEqual(set(obs_clades), set(exp_clades))
+        self.assertEqual(obs_lengths, exp_lengths)
+
+    def test_filter_clades(self):
+        clade_counts = [(frozenset(['A', 'B']), 8),
+                        (frozenset(['A', 'C']), 7),
+                        (frozenset(['A']), 6),
+                        (frozenset(['B']), 5)]
+        obs = _filter_clades(clade_counts, 2)
+        exp = {frozenset(['A', 'B']): 8,
+               frozenset(['A']): 6,
+               frozenset(['B']): 5}
+        self.assertEqual(obs, exp)
+
+        clade_counts = [(frozenset(['A']), 8),
+                        (frozenset(['B']), 7),
+                        (frozenset(['C']), 7),
+                        (frozenset(['A', 'B']), 6),
+                        (frozenset(['A', 'B', 'C']), 5),
+                        (frozenset(['D']), 2)]
+        obs = _filter_clades(clade_counts, 4)
+        exp = {frozenset(['A']): 8,
+               frozenset(['B']): 7,
+               frozenset(['C']): 7,
+               frozenset(['A', 'B']): 6,
+               frozenset(['A', 'B', 'C']): 5}
+        self.assertEqual(obs, exp)
+
+    def test_build_trees(self):
+        clade_counts = {frozenset(['A', 'B']): 6,
+                        frozenset(['A']): 7,
+                        frozenset(['B']): 8}
+        edge_lengths = {frozenset(['A', 'B']): 1,
+                        frozenset(['A']): 2,
+                        frozenset(['B']): 3}
+        tree = _build_trees(clade_counts, edge_lengths, 'foo')[0]
+        self.assertEqual(tree.foo, 6)
+        tree_foos = set([c.foo for c in tree.children])
+        tree_lens = set([c.length for c in tree.children])
+        self.assertEqual(tree_foos, set([7, 8]))
+        self.assertEqual(tree_lens, set([2, 3]))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/tree/tests/test_nj.py b/skbio/tree/tests/test_nj.py
new file mode 100644
index 0000000..554d6a7
--- /dev/null
+++ b/skbio/tree/tests/test_nj.py
@@ -0,0 +1,207 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from six import StringIO
+from unittest import TestCase, main
+
+from skbio import DistanceMatrix, TreeNode, nj
+from skbio.tree._nj import (
+    _compute_q, _compute_collapsed_dm, _lowest_index, _otu_to_new_node,
+    _pair_members_to_new_node)
+
+
+class NjTests(TestCase):
+
+    def setUp(self):
+        data1 = [[0,  5,  9,  9,  8],
+                 [5,  0, 10, 10,  9],
+                 [9, 10,  0,  8,  7],
+                 [9, 10,  8,  0,  3],
+                 [8,  9,  7,  3,  0]]
+        ids1 = list('abcde')
+        self.dm1 = DistanceMatrix(data1, ids1)
+        # this newick string was confirmed against http://www.trex.uqam.ca/
+        # which generated the following (isomorphic) newick string:
+        # (d:2.0000,e:1.0000,(c:4.0000,(a:2.0000,b:3.0000):3.0000):2.0000);
+        self.expected1_str = ("(d:2.000000, (c:4.000000, (b:3.000000,"
+                              " a:2.000000):3.000000):2.000000, e:1.000000);")
+        self.expected1_TreeNode = TreeNode.read(StringIO(self.expected1_str))
+
+        # this example was pulled from the Phylip manual
+        # http://evolution.genetics.washington.edu/phylip/doc/neighbor.html
+        data2 = [[0.0000, 1.6866, 1.7198, 1.6606, 1.5243, 1.6043, 1.5905],
+                 [1.6866, 0.0000, 1.5232, 1.4841, 1.4465, 1.4389, 1.4629],
+                 [1.7198, 1.5232, 0.0000, 0.7115, 0.5958, 0.6179, 0.5583],
+                 [1.6606, 1.4841, 0.7115, 0.0000, 0.4631, 0.5061, 0.4710],
+                 [1.5243, 1.4465, 0.5958, 0.4631, 0.0000, 0.3484, 0.3083],
+                 [1.6043, 1.4389, 0.6179, 0.5061, 0.3484, 0.0000, 0.2692],
+                 [1.5905, 1.4629, 0.5583, 0.4710, 0.3083, 0.2692, 0.0000]]
+        ids2 = ["Bovine", "Mouse", "Gibbon", "Orang", "Gorilla", "Chimp",
+                "Human"]
+        self.dm2 = DistanceMatrix(data2, ids2)
+        self.expected2_str = ("(Mouse:0.76891, (Gibbon:0.35793, (Orang:0.28469"
+                              ", (Gorilla:0.15393, (Chimp:0.15167, Human:0.117"
+                              "53):0.03982):0.02696):0.04648):0.42027, Bovine:"
+                              "0.91769);")
+        self.expected2_TreeNode = TreeNode.read(StringIO(self.expected2_str))
+
+        data3 = [[0, 5, 4, 7, 6, 8],
+                 [5, 0, 7, 10, 9, 11],
+                 [4, 7, 0, 7, 6, 8],
+                 [7, 10, 7, 0, 5, 8],
+                 [6, 9, 6, 5, 0, 8],
+                 [8, 11, 8, 8, 8, 0]]
+        ids3 = map(str, range(6))
+        self.dm3 = DistanceMatrix(data3, ids3)
+        self.expected3_str = ("((((0:1.000000,1:4.000000):1.000000,2:2.000000"
+                              "):1.250000,5:4.750000):0.750000,3:2.750000,4:2."
+                              "250000);")
+        self.expected3_TreeNode = TreeNode.read(StringIO(self.expected3_str))
+
+        # this dm can yield negative branch lengths
+        data4 = [[0,  5,  9,  9,  800],
+                 [5,  0, 10, 10,  9],
+                 [9, 10,  0,  8,  7],
+                 [9, 10,  8,  0,  3],
+                 [800,  9,  7,  3,  0]]
+        ids4 = list('abcde')
+        self.dm4 = DistanceMatrix(data4, ids4)
+
+    def test_nj_dm1(self):
+        self.assertEqual(nj(self.dm1, result_constructor=str),
+                         self.expected1_str)
+        # what is the correct way to compare TreeNode objects for equality?
+        actual_TreeNode = nj(self.dm1)
+        self.assertEqual(actual_TreeNode.compare_tip_distances(
+            self.expected1_TreeNode), 0.0)
+
+    def test_nj_dm2(self):
+        actual_TreeNode = nj(self.dm2)
+        self.assertAlmostEqual(actual_TreeNode.compare_tip_distances(
+            self.expected2_TreeNode), 0.0)
+
+    def test_nj_dm3(self):
+        actual_TreeNode = nj(self.dm3)
+        self.assertAlmostEqual(actual_TreeNode.compare_tip_distances(
+            self.expected3_TreeNode), 0.0)
+
+    def test_nj_zero_branch_length(self):
+        # no nodes have negative branch length when we disallow negative
+        # branch length. self is excluded as branch length is None
+        tree = nj(self.dm4)
+        for n in tree.postorder(include_self=False):
+            self.assertTrue(n.length >= 0)
+        # only tips associated with the large distance in the input
+        # have positive branch lengths when we allow negative branch
+        # length
+        tree = nj(self.dm4, False)
+        self.assertTrue(tree.find('a').length > 0)
+        self.assertTrue(tree.find('b').length < 0)
+        self.assertTrue(tree.find('c').length < 0)
+        self.assertTrue(tree.find('d').length < 0)
+        self.assertTrue(tree.find('e').length > 0)
+
+    def test_nj_trivial(self):
+        data = [[0, 3, 2],
+                [3, 0, 3],
+                [2, 3, 0]]
+        dm = DistanceMatrix(data, list('abc'))
+        expected_str = "(b:2.000000, a:1.000000, c:1.000000);"
+        self.assertEqual(nj(dm, result_constructor=str), expected_str)
+
+    def test_nj_error(self):
+        data = [[0, 3],
+                [3, 0]]
+        dm = DistanceMatrix(data, list('ab'))
+        self.assertRaises(ValueError, nj, dm)
+
+    def test_compute_q(self):
+        expected_data = [[0, -50, -38, -34, -34],
+                         [-50,   0, -38, -34, -34],
+                         [-38, -38,   0, -40, -40],
+                         [-34, -34, -40,   0, -48],
+                         [-34, -34, -40, -48,   0]]
+        expected_ids = list('abcde')
+        expected = DistanceMatrix(expected_data, expected_ids)
+        self.assertEqual(_compute_q(self.dm1), expected)
+
+        data = [[0, 3, 2],
+                [3, 0, 3],
+                [2, 3, 0]]
+        dm = DistanceMatrix(data, list('abc'))
+        # computed this manually
+        expected_data = [[0, -8, -8],
+                         [-8,  0, -8],
+                         [-8, -8,  0]]
+        expected = DistanceMatrix(expected_data, list('abc'))
+        self.assertEqual(_compute_q(dm), expected)
+
+    def test_compute_collapsed_dm(self):
+        expected_data = [[0,  7,  7,  6],
+                         [7,  0,  8,  7],
+                         [7,  8,  0,  3],
+                         [6,  7,  3,  0]]
+        expected_ids = ['x', 'c', 'd', 'e']
+        expected1 = DistanceMatrix(expected_data, expected_ids)
+        self.assertEqual(_compute_collapsed_dm(self.dm1, 'a', 'b', True, 'x'),
+                         expected1)
+
+        # computed manually
+        expected_data = [[0, 4, 3],
+                         [4, 0, 3],
+                         [3, 3, 0]]
+        expected_ids = ['yy', 'd', 'e']
+        expected2 = DistanceMatrix(expected_data, expected_ids)
+        self.assertEqual(
+            _compute_collapsed_dm(expected1, 'x', 'c', True, 'yy'), expected2)
+
+    def test_lowest_index(self):
+        self.assertEqual(_lowest_index(self.dm1), (4, 3))
+        self.assertEqual(_lowest_index(_compute_q(self.dm1)), (1, 0))
+
+    def test_otu_to_new_node(self):
+        self.assertEqual(_otu_to_new_node(self.dm1, 'a', 'b', 'c', True), 7)
+        self.assertEqual(_otu_to_new_node(self.dm1, 'a', 'b', 'd', True), 7)
+        self.assertEqual(_otu_to_new_node(self.dm1, 'a', 'b', 'e', True), 6)
+
+    def test_otu_to_new_node_zero_branch_length(self):
+        data = [[0, 40, 3],
+                [40, 0, 3],
+                [3, 3, 0]]
+        ids = ['a', 'b', 'c']
+        dm = DistanceMatrix(data, ids)
+        self.assertEqual(_otu_to_new_node(dm, 'a', 'b', 'c', True), 0)
+        self.assertEqual(_otu_to_new_node(dm, 'a', 'b', 'c', False), -17)
+
+    def test_pair_members_to_new_node(self):
+        self.assertEqual(_pair_members_to_new_node(self.dm1, 'a', 'b', True),
+                         (2, 3))
+        self.assertEqual(_pair_members_to_new_node(self.dm1, 'a', 'c', True),
+                         (4, 5))
+        self.assertEqual(_pair_members_to_new_node(self.dm1, 'd', 'e', True),
+                         (2, 1))
+
+    def test_pair_members_to_new_node_zero_branch_length(self):
+        # the values in this example don't really make sense
+        # (I'm not sure how you end up with these distances between
+        # three sequences), but that doesn't really matter for the sake
+        # of this test
+        data = [[0, 4, 2],
+                [4, 0, 38],
+                [2, 38, 0]]
+        ids = ['a', 'b', 'c']
+        dm = DistanceMatrix(data, ids)
+        self.assertEqual(_pair_members_to_new_node(dm, 'a', 'b', True), (0, 4))
+        # this makes it clear why negative branch lengths don't make sense...
+        self.assertEqual(
+            _pair_members_to_new_node(dm, 'a', 'b', False), (-16, 20))
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/tree/tests/test_tree.py b/skbio/tree/tests/test_tree.py
new file mode 100644
index 0000000..f125e77
--- /dev/null
+++ b/skbio/tree/tests/test_tree.py
@@ -0,0 +1,1350 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import warnings
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as nptest
+from scipy.stats import pearsonr
+from six import StringIO
+
+from skbio import DistanceMatrix, TreeNode
+from skbio.tree._tree import _dnd_tokenizer
+from skbio.tree import (DuplicateNodeError, NoLengthError,
+                        TreeError, MissingNodeError, NoParentError)
+from skbio.io import RecordError
+
+
+class TreeTests(TestCase):
+
+    def setUp(self):
+        """Prep the self"""
+        self.simple_t = TreeNode.from_newick("((a,b)i1,(c,d)i2)root;")
+        nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])
+        nodes['a'].append(nodes['b'])
+        nodes['b'].append(nodes['c'])
+        nodes['c'].append(nodes['d'])
+        nodes['c'].append(nodes['e'])
+        nodes['c'].append(nodes['f'])
+        nodes['f'].append(nodes['g'])
+        nodes['a'].append(nodes['h'])
+        self.TreeNode = nodes
+        self.TreeRoot = nodes['a']
+
+        def rev_f(items):
+            items.reverse()
+
+        def rotate_f(items):
+            tmp = items[-1]
+            items[1:] = items[:-1]
+            items[0] = tmp
+
+        self.rev_f = rev_f
+        self.rotate_f = rotate_f
+        self.complex_tree = TreeNode.from_newick("(((a,b)int1,(x,y,(w,z)int2,"
+                                                 "(c,d)int3)int4),(e,f)int5);")
+
+    def test_count(self):
+        """Get node counts"""
+        exp = 7
+        obs = self.simple_t.count()
+        self.assertEqual(obs, exp)
+
+        exp = 4
+        obs = self.simple_t.count(tips=True)
+        self.assertEqual(obs, exp)
+
+    def test_copy(self):
+        """copy a tree"""
+        self.simple_t.children[0].length = 1.2
+        self.simple_t.children[1].children[0].length = 0.5
+        cp = self.simple_t.copy()
+        gen = zip(cp.traverse(include_self=True),
+                  self.simple_t.traverse(include_self=True))
+
+        for a, b in gen:
+            self.assertIsNot(a, b)
+            self.assertEqual(a.name, b.name)
+            self.assertEqual(a.length, b.length)
+
+    def test_append(self):
+        """Append a node to a tree"""
+        second_tree = TreeNode.from_newick("(x,y)z;")
+        self.simple_t.append(second_tree)
+
+        self.assertEqual(self.simple_t.children[0].name, 'i1')
+        self.assertEqual(self.simple_t.children[1].name, 'i2')
+        self.assertEqual(self.simple_t.children[2].name, 'z')
+        self.assertEqual(len(self.simple_t.children), 3)
+        self.assertEqual(self.simple_t.children[2].children[0].name, 'x')
+        self.assertEqual(self.simple_t.children[2].children[1].name, 'y')
+        self.assertEqual(second_tree.parent, self.simple_t)
+
+    def test_extend(self):
+        """Extend a few nodes"""
+        second_tree = TreeNode.from_newick("(x1,y1)z1;")
+        third_tree = TreeNode.from_newick("(x2,y2)z2;")
+        self.simple_t.extend([second_tree, third_tree])
+
+        self.assertEqual(self.simple_t.children[0].name, 'i1')
+        self.assertEqual(self.simple_t.children[1].name, 'i2')
+        self.assertEqual(self.simple_t.children[2].name, 'z1')
+        self.assertEqual(self.simple_t.children[3].name, 'z2')
+        self.assertEqual(len(self.simple_t.children), 4)
+        self.assertEqual(self.simple_t.children[2].children[0].name, 'x1')
+        self.assertEqual(self.simple_t.children[2].children[1].name, 'y1')
+        self.assertEqual(self.simple_t.children[3].children[0].name, 'x2')
+        self.assertEqual(self.simple_t.children[3].children[1].name, 'y2')
+        self.assertIs(second_tree.parent, self.simple_t)
+        self.assertIs(third_tree.parent, self.simple_t)
+
+    def test_extend_empty(self):
+        """Extend on the empty case should work"""
+        self.simple_t.extend([])
+        self.assertEqual(self.simple_t.children[0].name, 'i1')
+        self.assertEqual(self.simple_t.children[1].name, 'i2')
+        self.assertEqual(len(self.simple_t.children), 2)
+
+    def test_iter(self):
+        """iter wraps children"""
+        exp = ['i1', 'i2']
+        obs = [n.name for n in self.simple_t]
+        self.assertEqual(obs, exp)
+
+    def test_gops(self):
+        """Basic TreeNode operations should work as expected"""
+        p = TreeNode()
+        self.assertEqual(str(p), ';\n')
+        p.name = 'abc'
+        self.assertEqual(str(p), 'abc;\n')
+        p.length = 3
+        self.assertEqual(str(p), 'abc:3;\n')  # don't suppress branch from root
+        q = TreeNode()
+        p.append(q)
+        self.assertEqual(str(p), '()abc:3;\n')
+        r = TreeNode()
+        q.append(r)
+        self.assertEqual(str(p), '(())abc:3;\n')
+        r.name = 'xyz'
+        self.assertEqual(str(p), '((xyz))abc:3;\n')
+        q.length = 2
+        self.assertEqual(str(p), '((xyz):2)abc:3;\n')
+
+    def test_pop(self):
+        """Pop off a node"""
+        second_tree = TreeNode.from_newick("(x1,y1)z1;")
+        third_tree = TreeNode.from_newick("(x2,y2)z2;")
+        self.simple_t.extend([second_tree, third_tree])
+
+        i1 = self.simple_t.pop(0)
+        z2 = self.simple_t.pop()
+
+        self.assertEqual(i1.name, 'i1')
+        self.assertEqual(z2.name, 'z2')
+        self.assertEqual(i1.children[0].name, 'a')
+        self.assertEqual(i1.children[1].name, 'b')
+        self.assertEqual(z2.children[0].name, 'x2')
+        self.assertEqual(z2.children[1].name, 'y2')
+
+        self.assertEqual(self.simple_t.children[0].name, 'i2')
+        self.assertEqual(self.simple_t.children[1].name, 'z1')
+        self.assertEqual(len(self.simple_t.children), 2)
+
+    def test_remove(self):
+        """Remove nodes"""
+        self.assertTrue(self.simple_t.remove(self.simple_t.children[0]))
+        self.assertEqual(len(self.simple_t.children), 1)
+        n = TreeNode()
+        self.assertFalse(self.simple_t.remove(n))
+
+    def test_remove_deleted(self):
+        """Remove nodes by function"""
+        def f(node):
+            return node.name in ['b', 'd']
+
+        self.simple_t.remove_deleted(f)
+        exp = "((a)i1,(c)i2)root;\n"
+        obs = str(self.simple_t)
+        self.assertEqual(obs, exp)
+
+    def test_adopt(self):
+        """Adopt a node!"""
+        n1 = TreeNode(name='n1')
+        n2 = TreeNode(name='n2')
+        n3 = TreeNode(name='n3')
+
+        self.simple_t._adopt(n1)
+        self.simple_t.children[-1]._adopt(n2)
+        n2._adopt(n3)
+
+        # adopt doesn't update .children
+        self.assertEqual(len(self.simple_t.children), 2)
+
+        self.assertIs(n1.parent, self.simple_t)
+        self.assertIs(n2.parent, self.simple_t.children[-1])
+        self.assertIs(n3.parent, n2)
+
+    def test_remove_node(self):
+        """Remove a node by index"""
+        n = self.simple_t._remove_node(-1)
+        self.assertEqual(n.parent, None)
+        self.assertEqual(len(self.simple_t.children), 1)
+        self.assertEqual(len(n.children), 2)
+        self.assertNotIn(n, self.simple_t.children)
+
+    def test_prune(self):
+        """Collapse single descendent nodes"""
+        # check the identity case
+        cp = self.simple_t.copy()
+        self.simple_t.prune()
+
+        gen = zip(cp.traverse(include_self=True),
+                  self.simple_t.traverse(include_self=True))
+
+        for a, b in gen:
+            self.assertIsNot(a, b)
+            self.assertEqual(a.name, b.name)
+            self.assertEqual(a.length, b.length)
+
+        # create a single descendent by removing tip 'a'
+        n = self.simple_t.children[0]
+        n.remove(n.children[0])
+        self.simple_t.prune()
+
+        self.assertEqual(len(self.simple_t.children), 2)
+        self.assertEqual(self.simple_t.children[0].name, 'i2')
+        self.assertEqual(self.simple_t.children[1].name, 'b')
+
+    def test_prune_length(self):
+        """Collapse single descendent nodes"""
+        # check the identity case
+        cp = self.simple_t.copy()
+        self.simple_t.prune()
+
+        gen = zip(cp.traverse(include_self=True),
+                  self.simple_t.traverse(include_self=True))
+
+        for a, b in gen:
+            self.assertIsNot(a, b)
+            self.assertEqual(a.name, b.name)
+            self.assertEqual(a.length, b.length)
+
+        for n in self.simple_t.traverse():
+            n.length = 1.0
+
+        # create a single descendent by removing tip 'a'
+        n = self.simple_t.children[0]
+        n.remove(n.children[0])
+        self.simple_t.prune()
+
+        self.assertEqual(len(self.simple_t.children), 2)
+        self.assertEqual(self.simple_t.children[0].name, 'i2')
+        self.assertEqual(self.simple_t.children[1].name, 'b')
+        self.assertEqual(self.simple_t.children[1].length, 2.0)
+
+    def test_subset(self):
+        """subset should return set of leaves that descends from node"""
+        t = self.simple_t
+        self.assertEqual(t.subset(), frozenset('abcd'))
+        c = t.children[0]
+        self.assertEqual(c.subset(), frozenset('ab'))
+        leaf = c.children[1]
+        self.assertEqual(leaf.subset(), frozenset(''))
+
+    def test_subsets(self):
+        """subsets should return all subsets descending from a set"""
+        t = self.simple_t
+        self.assertEqual(t.subsets(), frozenset(
+            [frozenset('ab'), frozenset('cd')]))
+
+    def test_is_tip(self):
+        """see if we're a tip or not"""
+        self.assertFalse(self.simple_t.is_tip())
+        self.assertFalse(self.simple_t.children[0].is_tip())
+        self.assertTrue(self.simple_t.children[0].children[0].is_tip())
+
+    def test_is_root(self):
+        """see if we're at the root or not"""
+        self.assertTrue(self.simple_t.is_root())
+        self.assertFalse(self.simple_t.children[0].is_root())
+        self.assertFalse(self.simple_t.children[0].children[0].is_root())
+
+    def test_root(self):
+        """Get the root!"""
+        root = self.simple_t
+        self.assertIs(root, self.simple_t.root())
+        self.assertIs(root, self.simple_t.children[0].root())
+        self.assertIs(root, self.simple_t.children[1].children[1].root())
+
+    def test_invalidate_lookup_caches(self):
+        root = self.simple_t
+        root.create_caches()
+        self.assertNotEqual(root._tip_cache, {})
+        self.assertNotEqual(root._non_tip_cache, {})
+        root.invalidate_caches()
+        self.assertEqual(root._tip_cache, {})
+        self.assertEqual(root._non_tip_cache, {})
+
+    def test_invalidate_attr_caches(self):
+        tree = TreeNode.from_newick("((a,b,(c,d)e)f,(g,h)i)root;")
+
+        def f(n):
+            return [n.name] if n.is_tip() else []
+
+        tree.cache_attr(f, 'tip_names')
+        tree.invalidate_caches()
+        for n in tree.traverse(include_self=True):
+            self.assertFalse(hasattr(n, 'tip_names'))
+
+    def test_create_caches_duplicate_tip_names(self):
+        with self.assertRaises(DuplicateNodeError):
+            TreeNode.from_newick('(a, a)').create_caches()
+
+    def test_find_all(self):
+        t = TreeNode.from_newick("((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;")
+        exp = [t.children[0],
+               t.children[1].children[0],
+               t.children[1],
+               t.children[2].children[1]]
+        obs = t.find_all('c')
+        self.assertEqual(obs, exp)
+
+        identity = t.find_all(t)
+        self.assertEqual(len(identity), 1)
+        self.assertEqual(identity[0], t)
+
+        identity_name = t.find_all('root')
+        self.assertEqual(len(identity_name), 1)
+        self.assertEqual(identity_name[0], t)
+
+        exp = [t.children[2],
+               t.children[0].children[0]]
+        obs = t.find_all('a')
+        self.assertEqual(obs, exp)
+
+        with self.assertRaises(MissingNodeError):
+            t.find_all('missing')
+
+    def test_find(self):
+        """Find a node in a tree"""
+        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        exp = t.children[0]
+        obs = t.find('c')
+        self.assertEqual(obs, exp)
+
+        exp = t.children[0].children[1]
+        obs = t.find('b')
+        self.assertEqual(obs, exp)
+
+        with self.assertRaises(MissingNodeError):
+            t.find('does not exist')
+
+    def test_find_cache_bug(self):
+        """First implementation did not force the cache to be at the root"""
+        t = TreeNode.from_newick("((a,b)c,(d,e)f,(g,h)f);")
+        exp_tip_cache_keys = set(['a', 'b', 'd', 'e', 'g', 'h'])
+        exp_non_tip_cache_keys = set(['c', 'f'])
+        tip_a = t.children[0].children[0]
+        tip_a.create_caches()
+        self.assertEqual(tip_a._tip_cache, {})
+        self.assertEqual(set(t._tip_cache), exp_tip_cache_keys)
+        self.assertEqual(set(t._non_tip_cache), exp_non_tip_cache_keys)
+        self.assertEqual(t._non_tip_cache['f'], [t.children[1], t.children[2]])
+
+    def test_find_by_id(self):
+        """Find a node by id"""
+        t1 = TreeNode.from_newick("((,),(,,));")
+        t2 = TreeNode.from_newick("((,),(,,));")
+
+        exp = t1.children[1]
+        obs = t1.find_by_id(6)  # right inner node with 3 children
+        self.assertEqual(obs, exp)
+
+        exp = t2.children[1]
+        obs = t2.find_by_id(6)  # right inner node with 3 children
+        self.assertEqual(obs, exp)
+
+        with self.assertRaises(MissingNodeError):
+            t1.find_by_id(100)
+
+    def test_find_by_func(self):
+        """Find nodes by a function"""
+        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+
+        def func(x):
+            return x.parent == t.find('c')
+
+        exp = ['a', 'b']
+        obs = [n.name for n in t.find_by_func(func)]
+        self.assertEqual(obs, exp)
+
+    def test_ancestors(self):
+        """Get all the ancestors"""
+        exp = ['i1', 'root']
+        obs = self.simple_t.children[0].children[0].ancestors()
+        self.assertEqual([o.name for o in obs], exp)
+
+        exp = ['root']
+        obs = self.simple_t.children[0].ancestors()
+        self.assertEqual([o.name for o in obs], exp)
+
+        exp = []
+        obs = self.simple_t.ancestors()
+        self.assertEqual([o.name for o in obs], exp)
+
+    def test_siblings(self):
+        """Get the siblings"""
+        exp = []
+        obs = self.simple_t.siblings()
+        self.assertEqual(obs, exp)
+
+        exp = ['i2']
+        obs = self.simple_t.children[0].siblings()
+        self.assertEqual([o.name for o in obs], exp)
+
+        exp = ['c']
+        obs = self.simple_t.children[1].children[1].siblings()
+        self.assertEqual([o.name for o in obs], exp)
+
+        self.simple_t.append(TreeNode(name="foo"))
+        self.simple_t.append(TreeNode(name="bar"))
+        exp = ['i1', 'foo', 'bar']
+        obs = self.simple_t.children[1].siblings()
+        self.assertEqual([o.name for o in obs], exp)
+
+    def test_ascii_art(self):
+        """Make some ascii trees"""
+        # unlabeled internal node
+        tr = TreeNode.from_newick("(B:0.2,(C:0.3,D:0.4):0.6)F;")
+        obs = tr.ascii_art(show_internal=True, compact=False)
+        exp = "          /-B\n-F-------|\n         |          /-C\n         "\
+              " \\--------|\n                    \\-D"
+        self.assertEqual(obs, exp)
+        obs = tr.ascii_art(show_internal=True, compact=True)
+        exp = "-F------- /-B\n          \-------- /-C\n                    \-D"
+        self.assertEqual(obs, exp)
+        obs = tr.ascii_art(show_internal=False, compact=False)
+        exp = "          /-B\n---------|\n         |          /-C\n         "\
+              " \\--------|\n                    \\-D"
+        self.assertEqual(obs, exp)
+
+    def test_ascii_art_three_children(self):
+        obs = TreeNode.from_newick('(a,(b,c,d));').ascii_art()
+        self.assertEqual(obs, exp_ascii_art_three_children)
+
+    def test_accumulate_to_ancestor(self):
+        """Get the distance from a node to its ancestor"""
+        t = TreeNode.from_newick("((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;")
+        a = t.find('a')
+        b = t.find('b')
+        exp_to_root = 0.1 + 0.3
+        obs_to_root = a.accumulate_to_ancestor(t)
+        self.assertEqual(obs_to_root, exp_to_root)
+
+        with self.assertRaises(NoParentError):
+            a.accumulate_to_ancestor(b)
+
+    def test_distance(self):
+        """Get the distance between two nodes"""
+        t = TreeNode.from_newick("((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;")
+        tips = sorted([n for n in t.tips()], key=lambda x: x.name)
+
+        nptest.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
+        nptest.assert_almost_equal(tips[0].distance(tips[1]), 0.3)
+        nptest.assert_almost_equal(tips[0].distance(tips[2]), 1.3)
+        with self.assertRaises(NoLengthError):
+            tips[0].distance(tips[3])
+
+        nptest.assert_almost_equal(tips[1].distance(tips[0]), 0.3)
+        nptest.assert_almost_equal(tips[1].distance(tips[1]), 0.0)
+        nptest.assert_almost_equal(tips[1].distance(tips[2]), 1.4)
+        with self.assertRaises(NoLengthError):
+            tips[1].distance(tips[3])
+
+        self.assertEqual(tips[2].distance(tips[0]), 1.3)
+        self.assertEqual(tips[2].distance(tips[1]), 1.4)
+        self.assertEqual(tips[2].distance(tips[2]), 0.0)
+        with self.assertRaises(NoLengthError):
+            tips[2].distance(tips[3])
+
+    def test_lowest_common_ancestor(self):
+        """TreeNode lowestCommonAncestor should return LCA for set of tips"""
+        t1 = TreeNode.from_newick("((a,(b,c)d)e,f,(g,h)i)j;")
+        t2 = t1.copy()
+        t3 = t1.copy()
+        t4 = t1.copy()
+        input1 = ['a']  # return self
+        input2 = ['a', 'b']  # return e
+        input3 = ['b', 'c']  # return d
+        input4 = ['a', 'h', 'g']  # return j
+        exp1 = t1.find('a')
+        exp2 = t2.find('e')
+        exp3 = t3.find('d')
+        exp4 = t4
+        obs1 = t1.lowest_common_ancestor(input1)
+        obs2 = t2.lowest_common_ancestor(input2)
+        obs3 = t3.lowest_common_ancestor(input3)
+        obs4 = t4.lowest_common_ancestor(input4)
+        self.assertEqual(obs1, exp1)
+        self.assertEqual(obs2, exp2)
+        self.assertEqual(obs3, exp3)
+        self.assertEqual(obs4, exp4)
+
+        # verify multiple calls work
+        t_mul = t1.copy()
+        exp_1 = t_mul.find('d')
+        exp_2 = t_mul.find('i')
+        obs_1 = t_mul.lowest_common_ancestor(['b', 'c'])
+        obs_2 = t_mul.lowest_common_ancestor(['g', 'h'])
+        self.assertEqual(obs_1, exp_1)
+        self.assertEqual(obs_2, exp_2)
+
+        # empty case
+        with self.assertRaises(ValueError):
+            t1.lowest_common_ancestor([])
+
+    def test_get_max_distance(self):
+        """get_max_distance should get max tip distance across tree"""
+        tree = TreeNode.from_newick(
+            "((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;")
+        dist, nodes = tree.get_max_distance()
+        nptest.assert_almost_equal(dist, 1.6)
+        self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
+
+    def test_set_max_distance(self):
+        """set_max_distance sets MaxDistTips across tree"""
+        tree = TreeNode.from_newick(
+            "((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;")
+        tree._set_max_distance()
+        tip_a, tip_b = tree.MaxDistTips
+        self.assertEqual(tip_a[0] + tip_b[0], 1.6)
+        self.assertEqual(sorted([tip_a[1].name, tip_b[1].name]), ['b', 'e'])
+
+    def test_shear(self):
+        """Shear the nodes"""
+        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        obs = str(t.shear(['G', 'M']))
+        exp = '(G:3.0,M:3.7);\n'
+        self.assertEqual(obs, exp)
+
+    def test_compare_tip_distances(self):
+        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        t2 = TreeNode.from_newick('(((H:1,G:1,O:1):2,R:3):1,X:4);')
+        obs = t.compare_tip_distances(t2)
+        # note: common taxa are H, G, R (only)
+        m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
+        m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
+        r = pearsonr(m1.flat, m2.flat)[0]
+        self.assertAlmostEqual(obs, (1 - r) / 2)
+
+    def test_compare_tip_distances_sample(self):
+        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        t2 = TreeNode.from_newick('(((H:1,G:1,O:1):2,R:3):1,X:4);')
+        obs = t.compare_tip_distances(t2, sample=3, shuffle_f=sorted)
+        # note: common taxa are H, G, R (only)
+        m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
+        m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
+        r = pearsonr(m1.flat, m2.flat)[0]
+        self.assertAlmostEqual(obs, (1 - r) / 2)
+
+        # 4 common taxa, still picking H, G, R
+        s = '((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
+        t = TreeNode.from_newick(s, TreeNode)
+        s3 = '(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
+        t3 = TreeNode.from_newick(s3, TreeNode)
+        obs = t.compare_tip_distances(t3, sample=3, shuffle_f=sorted)
+
+    def test_compare_tip_distances_no_common_tips(self):
+        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        t2 = TreeNode.from_newick('(((Z:1,Y:1,X:1):2,W:3):1,V:4);')
+
+        with self.assertRaises(ValueError):
+            t.compare_tip_distances(t2)
+
+    def test_compare_tip_distances_single_common_tip(self):
+        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        t2 = TreeNode.from_newick('(((R:1,Y:1,X:1):2,W:3):1,V:4);')
+
+        self.assertEqual(t.compare_tip_distances(t2), 1)
+        self.assertEqual(t2.compare_tip_distances(t), 1)
+
+    def test_tip_tip_distances_endpoints(self):
+        """Test getting specifc tip distances  with tipToTipDistances"""
+        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        nodes = [t.find('H'), t.find('G'), t.find('M')]
+        names = ['H', 'G', 'M']
+        exp = DistanceMatrix(np.array([[0, 2.0, 6.7],
+                                       [2.0, 0, 6.7],
+                                       [6.7, 6.7, 0.0]]), ['H', 'G', 'M'])
+
+        obs = t.tip_tip_distances(endpoints=names)
+        self.assertEqual(obs, exp)
+
+        obs = t.tip_tip_distances(endpoints=nodes)
+        self.assertEqual(obs, exp)
+
+    def test_tip_tip_distances_non_tip_endpoints(self):
+        t = TreeNode.from_newick('((H:1,G:1)foo:2,(R:0.5,M:0.7):3);')
+        with self.assertRaises(ValueError):
+            t.tip_tip_distances(endpoints=['foo'])
+
+    def test_tip_tip_distances_no_length(self):
+        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        with self.assertRaises(NoLengthError):
+            t.tip_tip_distances()
+
+    def test_neighbors(self):
+        """Get neighbors of a node"""
+        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        exp = t.children
+        obs = t.neighbors()
+        self.assertEqual(obs, exp)
+
+        exp = t.children[0].children + [t]
+        obs = t.children[0].neighbors()
+        self.assertEqual(obs, exp)
+
+        exp = [t.children[0].children[0]] + [t]
+        obs = t.children[0].neighbors(ignore=t.children[0].children[1])
+        self.assertEqual(obs, exp)
+
+        exp = [t.children[0]]
+        obs = t.children[0].children[0].neighbors()
+        self.assertEqual(obs, exp)
+
+    def test_has_children(self):
+        """Test if has children"""
+        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        self.assertTrue(t.has_children())
+        self.assertTrue(t.children[0].has_children())
+        self.assertTrue(t.children[1].has_children())
+        self.assertFalse(t.children[0].children[0].has_children())
+        self.assertFalse(t.children[0].children[1].has_children())
+        self.assertFalse(t.children[1].children[0].has_children())
+        self.assertFalse(t.children[1].children[1].has_children())
+
+    def test_tips(self):
+        """Tip traversal of tree"""
+        exp = ['a', 'b', 'c', 'd']
+        obs = [n.name for n in self.simple_t.tips()]
+        self.assertEqual(obs, exp)
+        obs2 = [n.name for n in self.simple_t.traverse(False, False)]
+        self.assertEqual(obs2, exp)
+
+    def test_pre_and_postorder(self):
+        """Pre and post order traversal of the tree"""
+        exp = ['root', 'i1', 'a', 'b', 'i1', 'i2', 'c', 'd', 'i2', 'root']
+        obs = [n.name for n in self.simple_t.pre_and_postorder()]
+        self.assertEqual(obs, exp)
+        obs2 = [n.name for n in self.simple_t.traverse(True, True)]
+        self.assertEqual(obs2, exp)
+
+    def test_pre_and_postorder_no_children(self):
+        t = TreeNode('brofist')
+
+        # include self
+        exp = ['brofist']
+        obs = [n.name for n in t.pre_and_postorder()]
+        self.assertEqual(obs, exp)
+
+        # do not include self
+        obs = list(t.pre_and_postorder(include_self=False))
+        self.assertEqual(obs, [])
+
+    def test_levelorder(self):
+        """Test level order traversal of the tree"""
+        exp = ['root', 'i1', 'i2', 'a', 'b', 'c', 'd']
+        obs = [n.name for n in self.simple_t.levelorder()]
+        self.assertEqual(obs, exp)
+
+    def test_index_tree(self):
+        """index_tree should produce correct index and node map"""
+        # test for first tree: contains singleton outgroup
+        t1 = TreeNode.from_newick('(((a,b),c),(d,e))')
+        t2 = TreeNode.from_newick('(((a,b),(c,d)),(e,f))')
+        t3 = TreeNode.from_newick('(((a,b,c),(d)),(e,f))')
+
+        id_1, child_1 = t1.index_tree()
+        nodes_1 = [n.id for n in t1.traverse(self_before=False,
+                   self_after=True)]
+        self.assertEqual(nodes_1, [0, 1, 2, 3, 6, 4, 5, 7, 8])
+        self.assertEqual(child_1, [(2, 0, 1), (6, 2, 3), (7, 4, 5), (8, 6, 7)])
+
+        # test for second tree: strictly bifurcating
+        id_2, child_2 = t2.index_tree()
+        nodes_2 = [n.id for n in t2.traverse(self_before=False,
+                   self_after=True)]
+        self.assertEqual(nodes_2, [0, 1, 4, 2, 3, 5, 8, 6, 7, 9, 10])
+        self.assertEqual(child_2, [(4, 0, 1), (5, 2, 3), (8, 4, 5), (9, 6, 7),
+                                   (10, 8, 9)])
+
+        # test for third tree: contains trifurcation and single-child parent
+        id_3, child_3 = t3.index_tree()
+        nodes_3 = [n.id for n in t3.traverse(self_before=False,
+                   self_after=True)]
+        self.assertEqual(nodes_3, [0, 1, 2, 4, 3, 5, 8, 6, 7, 9, 10])
+        self.assertEqual(child_3, [(4, 0, 2), (5, 3, 3), (8, 4, 5), (9, 6, 7),
+                                   (10, 8, 9)])
+
+    def test_root_at(self):
+        """Form a new root"""
+        t = TreeNode.from_newick("(((a,b)c,(d,e)f)g,h)i;")
+        with self.assertRaises(TreeError):
+            t.root_at(t.find('h'))
+
+        exp = "(a,b,((d,e)f,(h)g)c)root;\n"
+        rooted = t.root_at('c')
+        obs = str(rooted)
+        self.assertEqual(obs, exp)
+
+    def test_root_at_midpoint(self):
+        """Root at the midpoint"""
+        tree1 = self.TreeRoot
+        for n in tree1.traverse():
+            n.length = 1
+
+        result = tree1.root_at_midpoint()
+        self.assertEqual(result.distance(result.find('e')), 1.5)
+        self.assertEqual(result.distance(result.find('g')), 2.5)
+        exp_dist = tree1.tip_tip_distances()
+        obs_dist = result.tip_tip_distances()
+        self.assertEqual(obs_dist, exp_dist)
+
+    def test_root_at_midpoint_no_lengths(self):
+        # should get same tree back (a copy)
+        nwk = '(a,b)c;\n'
+        t = TreeNode.from_newick(nwk)
+        obs = t.root_at_midpoint()
+        self.assertEqual(str(obs), nwk)
+
+    def test_compare_subsets(self):
+        """compare_subsets should return the fraction of shared subsets"""
+        t = TreeNode.from_newick('((H,G),(R,M));')
+        t2 = TreeNode.from_newick('(((H,G),R),M);')
+        t4 = TreeNode.from_newick('(((H,G),(O,R)),X);')
+
+        result = t.compare_subsets(t)
+        self.assertEqual(result, 0)
+
+        result = t2.compare_subsets(t2)
+        self.assertEqual(result, 0)
+
+        result = t.compare_subsets(t2)
+        self.assertEqual(result, 0.5)
+
+        result = t.compare_subsets(t4)
+        self.assertEqual(result, 1 - 2. / 5)
+
+        result = t.compare_subsets(t4, exclude_absent_taxa=True)
+        self.assertEqual(result, 1 - 2. / 3)
+
+        result = t.compare_subsets(self.TreeRoot, exclude_absent_taxa=True)
+        self.assertEqual(result, 1)
+
+        result = t.compare_subsets(self.TreeRoot)
+        self.assertEqual(result, 1)
+
+    def test_compare_rfd(self):
+        """compare_rfd should return the Robinson Foulds distance"""
+        t = TreeNode.from_newick('((H,G),(R,M));')
+        t2 = TreeNode.from_newick('(((H,G),R),M);')
+        t4 = TreeNode.from_newick('(((H,G),(O,R)),X);')
+
+        obs = t.compare_rfd(t2)
+        exp = 2.0
+        self.assertEqual(obs, exp)
+
+        self.assertEqual(t.compare_rfd(t2), t2.compare_rfd(t))
+
+        obs = t.compare_rfd(t2, proportion=True)
+        exp = 0.5
+        self.assertEqual(obs, exp)
+
+        with self.assertRaises(ValueError):
+            t.compare_rfd(t4)
+
+    def test_assign_ids(self):
+        """Assign IDs to the tree"""
+        t1 = TreeNode.from_newick("(((a,b),c),(e,f),(g));")
+        t2 = TreeNode.from_newick("(((a,b),c),(e,f),(g));")
+        t3 = TreeNode.from_newick("((g),(e,f),(c,(a,b)));")
+        t1_copy = t1.copy()
+
+        t1.assign_ids()
+        t2.assign_ids()
+        t3.assign_ids()
+        t1_copy.assign_ids()
+
+        self.assertEqual([(n.name, n.id) for n in t1.traverse()],
+                         [(n.name, n.id) for n in t2.traverse()])
+        self.assertEqual([(n.name, n.id) for n in t1.traverse()],
+                         [(n.name, n.id) for n in t1_copy.traverse()])
+        self.assertNotEqual([(n.name, n.id) for n in t1.traverse()],
+                            [(n.name, n.id) for n in t3.traverse()])
+
+    def test_assign_ids_index_tree(self):
+        """assign_ids and index_tree should assign the same IDs"""
+        t1 = TreeNode.from_newick('(((a,b),c),(d,e))')
+        t2 = TreeNode.from_newick('(((a,b),(c,d)),(e,f))')
+        t3 = TreeNode.from_newick('(((a,b,c),(d)),(e,f))')
+        t1_copy = t1.copy()
+        t2_copy = t2.copy()
+        t3_copy = t3.copy()
+
+        t1.assign_ids()
+        t1_copy.index_tree()
+        t2.assign_ids()
+        t2_copy.index_tree()
+        t3.assign_ids()
+        t3_copy.index_tree()
+
+        self.assertEqual([n.id for n in t1.traverse()],
+                         [n.id for n in t1_copy.traverse()])
+        self.assertEqual([n.id for n in t2.traverse()],
+                         [n.id for n in t2_copy.traverse()])
+        self.assertEqual([n.id for n in t3.traverse()],
+                         [n.id for n in t3_copy.traverse()])
+
+    def test_unrooted_deepcopy(self):
+        """Do an unrooted_copy"""
+        t = TreeNode.from_newick("((a,(b,c)d)e,(f,g)h)i;")
+        exp = "(b,c,(a,((f,g)h)e)d)root;\n"
+        obs = t.find('d').unrooted_deepcopy()
+        self.assertEqual(str(obs), exp)
+
+        t_ids = {id(n) for n in t.traverse()}
+        obs_ids = {id(n) for n in obs.traverse()}
+
+        self.assertEqual(t_ids.intersection(obs_ids), set())
+
+    def test_descending_branch_length(self):
+        """Calculate descending branch_length"""
+        tr = TreeNode.from_newick("(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
+                                  ":.4,I:.5)J:1.3)K;")
+        tdbl = tr.descending_branch_length()
+        sdbl = tr.descending_branch_length(['A', 'E'])
+        nptest.assert_almost_equal(tdbl, 8.9)
+        nptest.assert_almost_equal(sdbl, 2.2)
+        self.assertRaises(ValueError, tr.descending_branch_length,
+                          ['A', 'DNE'])
+        self.assertRaises(ValueError, tr.descending_branch_length, ['A', 'C'])
+
+        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4"
+                                  ",I:.5)J:1.3)K;")
+        tdbl = tr.descending_branch_length()
+        nptest.assert_almost_equal(tdbl, 8.8)
+
+        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:"
+                                  ".5)J:1.3)K;")
+        tdbl = tr.descending_branch_length()
+        nptest.assert_almost_equal(tdbl, 7.9)
+
+        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:"
+                                  ".5)J:1.3)K;")
+        tdbl = tr.descending_branch_length(['A', 'D', 'E'])
+        nptest.assert_almost_equal(tdbl, 2.1)
+
+        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:."
+                                  "4,I:.5)J:1.3)K;")
+        tdbl = tr.descending_branch_length(['I', 'D', 'E'])
+        nptest.assert_almost_equal(tdbl, 6.6)
+
+        # test with a situation where we have unnamed internal nodes
+        tr = TreeNode.from_newick("(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I:"
+                                  ".5)J:1.3);")
+        tdbl = tr.descending_branch_length()
+        nptest.assert_almost_equal(tdbl, 7.9)
+
+    def test_to_array(self):
+        """Convert a tree to arrays"""
+        t = TreeNode.from_newick(
+            '(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10)')
+        id_index, child_index = t.index_tree()
+        arrayed = t.to_array()
+
+        self.assertEqual(id_index, arrayed['id_index'])
+        self.assertEqual(child_index, arrayed['child_index'])
+
+        exp = np.array([1, 2, 3, 5, 4, 6, 8, 9, 7, 10, np.nan])
+        obs = arrayed['length']
+        nptest.assert_equal(obs, exp)
+
+        exp = np.array(['a', 'b', 'c', 'd', 'x',
+                        'y', 'e', 'f', 'z', 'z', None])
+        obs = arrayed['name']
+        nptest.assert_equal(obs, exp)
+
+        exp = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+        obs = arrayed['id']
+        nptest.assert_equal(obs, exp)
+
+    def test_to_array_attrs(self):
+        t = TreeNode.from_newick(
+            '(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10)')
+        id_index, child_index = t.index_tree()
+        arrayed = t.to_array(attrs=[('name', object)])
+
+        # should only have id_index, child_index, and name since we specified
+        # attrs
+        self.assertEqual(len(arrayed), 3)
+
+        self.assertEqual(id_index, arrayed['id_index'])
+        self.assertEqual(child_index, arrayed['child_index'])
+
+        exp = np.array(['a', 'b', 'c', 'd', 'x',
+                        'y', 'e', 'f', 'z', 'z', None])
+        obs = arrayed['name']
+        nptest.assert_equal(obs, exp)
+
+        # invalid attrs
+        with self.assertRaises(AttributeError):
+            t.to_array(attrs=[('name', object), ('brofist', int)])
+
+    def test_from_taxonomy(self):
+        input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
+                          '2': ['a', 'b', 'c', None, None, 'x', 'y'],
+                          '3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
+                          '4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
+                          '5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
+        exp = TreeNode.from_newick("((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
+                                   "(((((((3,5)n,(4)q)m)l)k)j)i)h);")
+
+        root = TreeNode.from_taxonomy(input_lineages.items())
+
+        self.assertEqual(root.compare_subsets(exp), 0.0)
+
+    def test_to_taxonomy(self):
+        input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
+                          '2': ['a', 'b', 'c', None, None, 'x', 'y'],
+                          '3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
+                          '4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
+                          '5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
+        tree = TreeNode.from_taxonomy(input_lineages.items())
+        exp = sorted(input_lineages.items())
+        obs = [(n.name, lin) for n, lin in tree.to_taxonomy(allow_empty=True)]
+        self.assertEqual(sorted(obs), exp)
+
+    def test_to_taxonomy_filter(self):
+        input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
+                          '2': ['a', 'b', 'c', None, None, 'x', 'y'],
+                          '3': ['h', 'i', 'j', 'k', 'l'],  # test jagged
+                          '4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
+                          '5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
+        tree = TreeNode.from_taxonomy(input_lineages.items())
+
+        def f(node, lin):
+            return 'k' in lin or 'x' in lin
+
+        exp = [('2', ['a', 'b', 'c', 'x', 'y']),
+               ('3', ['h', 'i', 'j', 'k', 'l']),
+               ('4', ['h', 'i', 'j', 'k', 'l', 'm', 'q']),
+               ('5', ['h', 'i', 'j', 'k', 'l', 'm', 'n'])]
+        obs = [(n.name, lin) for n, lin in tree.to_taxonomy(filter_f=f)]
+        self.assertEqual(sorted(obs), exp)
+
+    def test_from_file(self):
+        """Parse a tree from a file"""
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("ignore")
+            t_io = StringIO("((a,b)c,(d,e)f)g;")
+            t = TreeNode.from_file(t_io)
+            self.assertEqual(list('abcdefg'), [n.name for n in t.postorder()])
+
+    def test_linkage_matrix(self):
+        # Ensure matches: http://www.southampton.ac.uk/~re1u06/teaching/upgma/
+        id_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
+        linkage = np.asarray([[1.0,  5.0,  1.0,  2.0],
+                              [0.0,  3.0,  8.0,  2.0],
+                              [6.0,  7.0, 12.5,  3.0],
+                              [8.0,  9.0, 16.5,  5.0],
+                              [2.0, 10.0, 29.0,  6.0],
+                              [4.0, 11.0, 34.0,  7.0]])
+
+        tree = TreeNode.from_linkage_matrix(linkage, id_list)
+        self.assertEqual("(E:17.0,(C:14.5,((A:4.0,D:4.0):4.25,(G:6.25,(B:0.5,"
+                         "F:0.5):5.75):2.0):6.25):2.5);\n",
+                         str(tree))
+
+    def test_from_newick_empty(self):
+        obs = TreeNode.from_newick('')
+        self.assertTrue(obs.name is None)
+        self.assertTrue(obs.length is None)
+        self.assertTrue(obs.parent is None)
+        self.assertEqual(obs.children, [])
+        self.assertTrue(obs.id is None)
+
+    def test_from_newick_embedded_semicolon(self):
+        with self.assertRaises(RecordError):
+            TreeNode.from_newick('(a,(c,;b))')
+
+    def test_to_newick_single_node(self):
+        # single node, no name, with semicolon
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("ignore")
+            obs = TreeNode().to_newick()
+            self.assertEqual(obs, ';')
+
+            # single node, no name, without semicolon
+            obs = TreeNode().to_newick(semicolon=False)
+            self.assertEqual(obs, '')
+
+            # single node, with name, with semicolon
+            obs = TreeNode(name='brofist').to_newick()
+            self.assertEqual(obs, 'brofist;')
+
+            # single node, with name, without semicolon
+            obs = TreeNode(name='brofist').to_newick(semicolon=False)
+            self.assertEqual(obs, 'brofist')
+
+    def test_to_newick_multi_node(self):
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("ignore")
+            t = TreeNode.from_newick(double)
+
+            # with semicolon
+            obs = t.to_newick()
+            self.assertEqual(obs, '(abc,def);')
+
+            # without semicolon
+            obs = t.to_newick(semicolon=False)
+            self.assertEqual(obs, '(abc,def)')
+
+    def test_shuffle_invalid_iter(self):
+        shuffler = self.simple_t.shuffle(n=-1)
+        with self.assertRaises(ValueError):
+            next(shuffler)
+
+    def test_shuffle_n_2(self):
+        exp = ["((a,b)i1,(d,c)i2)root;\n",
+               "((a,b)i1,(c,d)i2)root;\n",
+               "((a,b)i1,(d,c)i2)root;\n",
+               "((a,b)i1,(c,d)i2)root;\n",
+               "((a,b)i1,(d,c)i2)root;\n"]
+
+        obs_g = self.simple_t.shuffle(k=2, shuffle_f=self.rev_f, n=np.inf)
+        obs = [str(next(obs_g)) for i in range(5)]
+        self.assertEqual(obs, exp)
+
+    def test_shuffle_n_none(self):
+        exp = ["((d,c)i1,(b,a)i2)root;\n",
+               "((a,b)i1,(c,d)i2)root;\n",
+               "((d,c)i1,(b,a)i2)root;\n",
+               "((a,b)i1,(c,d)i2)root;\n"]
+        obs_g = self.simple_t.shuffle(shuffle_f=self.rev_f, n=4)
+        obs = [str(next(obs_g)) for i in range(4)]
+        self.assertEqual(obs, exp)
+
+    def test_shuffle_complex(self):
+        exp = ["(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
+               "(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n",
+               "(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
+               "(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n"]
+
+        obs_g = self.complex_tree.shuffle(shuffle_f=self.rev_f,
+                                          names=['c', 'd', 'e', 'f'], n=4)
+        obs = [str(next(obs_g)) for i in range(4)]
+        self.assertEqual(obs, exp)
+
+    def test_shuffle_names(self):
+        exp = ["((c,a)i1,(b,d)i2)root;\n",
+               "((b,c)i1,(a,d)i2)root;\n",
+               "((a,b)i1,(c,d)i2)root;\n",
+               "((c,a)i1,(b,d)i2)root;\n"]
+
+        obs_g = self.simple_t.shuffle(names=['a', 'b', 'c'],
+                                      shuffle_f=self.rotate_f, n=np.inf)
+        obs = [str(next(obs_g)) for i in range(4)]
+        self.assertEqual(obs, exp)
+
+    def test_shuffle_raises(self):
+        with self.assertRaises(ValueError):
+            next(self.simple_t.shuffle(k=1))
+
+        with self.assertRaises(ValueError):
+            next(self.simple_t.shuffle(k=5, names=['a', 'b']))
+
+        with self.assertRaises(MissingNodeError):
+            next(self.simple_t.shuffle(names=['x', 'y']))
+
+
+class DndTokenizerTests(TestCase):
+
+    """Tests of the DndTokenizer factory function."""
+
+    def test_gdata(self):
+        """DndTokenizer should work as expected on real data"""
+        exp = \
+            ['(', '(', 'xyz', ':', '0.28124', ',', '(', 'def', ':', '0.24498',
+             ',', 'mno', ':', '0.03627', ')', ':', '0.17710', ')', ':',
+             '0.04870', ',', 'abc', ':', '0.05925', ',', '(', 'ghi', ':',
+             '0.06914', ',', 'jkl', ':', '0.13776', ')', ':', '0.09853', ')',
+             ';']
+        # split it up for debugging on an item-by-item basis
+        obs = list(_dnd_tokenizer(sample))
+        self.assertEqual(len(obs), len(exp))
+        for i, j in zip(obs, exp):
+            self.assertEqual(i, j)
+        # try it all in one go
+        self.assertEqual(list(_dnd_tokenizer(sample)), exp)
+
+    def test_nonames(self):
+        """DndTokenizer should work as expected on trees with no names"""
+        exp = ['(', '(', ',', ')', ',', '(', ',', ')', ')', ';']
+        obs = list(_dnd_tokenizer(no_names))
+        self.assertEqual(obs, exp)
+
+    def test_missing_tip_name(self):
+        """DndTokenizer should work as expected on trees with a missing name"""
+        exp = ['(', '(', 'a', ',', 'b', ')', ',', '(', 'c', ',', ')', ')', ';']
+        obs = list(_dnd_tokenizer(missing_tip_name))
+        self.assertEqual(obs, exp)
+
+    def test_minimal(self):
+        """DndTokenizer should work as expected a minimal tree without names"""
+        exp = ['(', ')', ';']
+        obs = list(_dnd_tokenizer(minimal))
+        self.assertEqual(obs, exp)
+
+
+class DndParserTests(TestCase):
+
+    """Tests of the DndParser factory function."""
+
+    def test_nonames(self):
+        """DndParser should produce the correct tree when there are no names"""
+        obs = TreeNode.from_newick(no_names)
+        exp = TreeNode()
+        exp.append(TreeNode())
+        exp.append(TreeNode())
+        exp.children[0].append(TreeNode())
+        exp.children[0].append(TreeNode())
+        exp.children[1].append(TreeNode())
+        exp.children[1].append(TreeNode())
+        self.assertEqual(str(obs), str(exp))
+
+    def test_minimal(self):
+        """DndParser should produce the correct minimal tree"""
+        obs = TreeNode.from_newick(minimal)
+        exp = TreeNode()
+        exp.append(TreeNode())
+        self.assertEqual(str(obs), str(exp))
+
+    def test_missing_tip_name(self):
+        """DndParser should produce the correct tree when missing a name"""
+        obs = TreeNode.from_newick(missing_tip_name)
+        exp = TreeNode()
+        exp.append(TreeNode())
+        exp.append(TreeNode())
+        exp.children[0].append(TreeNode(name='a'))
+        exp.children[0].append(TreeNode(name='b'))
+        exp.children[1].append(TreeNode(name='c'))
+        exp.children[1].append(TreeNode())
+        self.assertEqual(str(obs), str(exp))
+
+    def test_gsingle(self):
+        """DndParser should produce a single-child TreeNode on minimal data"""
+        t = TreeNode.from_newick(single)
+        self.assertEqual(len(t), 1)
+        child = t[0]
+        self.assertEqual(child.name, 'abc')
+        self.assertEqual(child.length, 3)
+        self.assertEqual(str(t), '(abc:3.0);\n')
+
+    def test_gdouble(self):
+        """DndParser should produce a double-child TreeNode from data"""
+        t = TreeNode.from_newick(double)
+        self.assertEqual(len(t), 2)
+        self.assertEqual(str(t), '(abc:3.0,def:4.0);\n')
+
+    def test_gonenest(self):
+        """DndParser should work correctly with nested data"""
+        t = TreeNode.from_newick(onenest)
+        self.assertEqual(len(t), 2)
+        self.assertEqual(len(t[0]), 0)  # first child is terminal
+        self.assertEqual(len(t[1]), 2)  # second child has two children
+        self.assertEqual(str(t), '(abc:3.0,(def:4.0,ghi:5.0):6.0);\n')
+
+    def test_gnodedata(self):
+        """DndParser should assign name to internal nodes correctly"""
+        t = TreeNode.from_newick(nodedata)
+        self.assertEqual(len(t), 2)
+        self.assertEqual(len(t[0]), 0)  # first child is terminal
+        self.assertEqual(len(t[1]), 2)  # second child has two children
+        self.assertEqual(str(t), '(abc:3.0,(def:4.0,ghi:5.0)jkl:6.0);\n')
+        info_dict = {}
+        for node in t.traverse():
+            info_dict[node.name] = node.length
+        self.assertEqual(info_dict['abc'], 3.0)
+        self.assertEqual(info_dict['def'], 4.0)
+        self.assertEqual(info_dict['ghi'], 5.0)
+        self.assertEqual(info_dict['jkl'], 6.0)
+
+    def test_data(self):
+        """DndParser should work as expected on real data"""
+        t = TreeNode.from_newick(sample)
+        self.assertEqual(
+            str(t), '((xyz:0.28124,(def:0.24498,mno:0.03627):0.1771):0.0487,'
+                    'abc:0.05925,(ghi:0.06914,jkl:0.13776):0.09853);\n')
+        tdata = TreeNode.from_newick(node_data_sample, unescape_name=True)
+        self.assertEqual(
+            str(tdata), "((xyz:0.28124,(def:0.24498,mno:0.03627)A:0.1771)"
+                        "B:0.0487,abc:0.05925,(ghi:0.06914,jkl:0.13776)"
+                        "C:0.09853);\n")
+
+    def test_gbad(self):
+        """DndParser should fail if parens unbalanced"""
+        left = '((abc:3)'
+        right = '(abc:3))'
+        self.assertRaises(RecordError, TreeNode.from_newick, left)
+        self.assertRaises(RecordError, TreeNode.from_newick, right)
+
+    def test_DndParser(self):
+        """DndParser tests"""
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("ignore")
+            t_str = "(A_a,(B:1.0,C),'D_e':0.5)E;"
+            tree_unesc = TreeNode.from_newick(t_str, unescape_name=True)
+            tree_esc = TreeNode.from_newick(t_str, unescape_name=False)
+
+            self.assertEqual(tree_unesc.name, 'E')
+            self.assertEqual(tree_unesc.children[0].name, 'A a')
+            self.assertEqual(tree_unesc.children[1].children[0].name, 'B')
+            self.assertEqual(tree_unesc.children[1].children[0].length, 1.0)
+            self.assertEqual(tree_unesc.children[1].children[1].name, 'C')
+            self.assertEqual(tree_unesc.children[2].name, 'D_e')
+            self.assertEqual(tree_unesc.children[2].length, 0.5)
+
+            self.assertEqual(tree_esc.name, 'E')
+            self.assertEqual(tree_esc.children[0].name, 'A_a')
+            self.assertEqual(tree_esc.children[1].children[0].name, 'B')
+            self.assertEqual(tree_esc.children[1].children[0].length, 1.0)
+            self.assertEqual(tree_esc.children[1].children[1].name, 'C')
+            self.assertEqual(tree_esc.children[2].name, "'D_e'")
+            self.assertEqual(tree_esc.children[2].length, 0.5)
+
+            reload_test = tree_esc.to_newick(with_distances=True,
+                                             escape_name=False)
+            obs = TreeNode.from_newick(reload_test, unescape_name=False)
+            self.assertEqual(obs.to_newick(with_distances=True),
+                             tree_esc.to_newick(with_distances=True))
+            reload_test = tree_unesc.to_newick(with_distances=True,
+                                               escape_name=False)
+            obs = TreeNode.from_newick(reload_test, unescape_name=False)
+            self.assertEqual(obs.to_newick(with_distances=True),
+                             tree_unesc.to_newick(with_distances=True))
+
+    def test_DndParser_list(self):
+        """Make sure TreeNode.from_newick can handle list of strings"""
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("ignore")
+
+            t_str = ["(A_a,(B:1.0,C)", ",'D_e':0.5)E;"]
+            tree_unesc = TreeNode.from_newick(t_str, unescape_name=True)
+
+            self.assertEqual(tree_unesc.name, 'E')
+            self.assertEqual(tree_unesc.children[0].name, 'A a')
+            self.assertEqual(tree_unesc.children[1].children[0].name, 'B')
+            self.assertEqual(tree_unesc.children[1].children[0].length, 1.0)
+            self.assertEqual(tree_unesc.children[1].children[1].name, 'C')
+            self.assertEqual(tree_unesc.children[2].name, 'D_e')
+            self.assertEqual(tree_unesc.children[2].length, 0.5)
+
+    def test_cache_attr_tip_list(self):
+        tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
+
+        def f(n):
+            return [n.name] if n.is_tip() else []
+
+        tree.cache_attr(f, 'tip_names')
+        self.assertEqual(tree.tip_names, ['a', 'b', 'c', 'd', 'g', 'h'])
+        self.assertEqual(tree.children[0].tip_names, ['a', 'b', 'c', 'd'])
+        self.assertEqual(tree.children[1].tip_names, ['g', 'h'])
+        self.assertEqual(tree.children[0].children[2].tip_names, ['c', 'd'])
+
+    def test_cache_attr_nontip_set(self):
+        tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
+
+        def f(n):
+            return [n.name] if not n.is_tip() else []
+
+        tree.cache_attr(f, 'nontip_names')
+        self.assertEqual(tree.nontip_names, ['e', 'f', 'i', 'root'])
+        self.assertEqual(tree.children[0].nontip_names, ['e', 'f'])
+        self.assertEqual(tree.children[1].nontip_names, ['i'])
+        self.assertEqual(tree.children[0].children[2].nontip_names, ['e'])
+
+    def test_cache_attr_bad_type(self):
+        tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
+
+        def f(n):
+            return [n.name] if not n.is_tip() else []
+
+        with self.assertRaises(TypeError):
+            tree.cache_attr(f, 'nontip_names', TreeNode)
+
+
+sample = """
+(
+(
+xyz:0.28124,
+(
+def:0.24498,
+mno:0.03627)
+:0.17710)
+:0.04870,
+
+abc:0.05925,
+(
+ghi:0.06914,
+jkl:0.13776)
+:0.09853);
+"""
+
+node_data_sample = """
+(
+(
+xyz:0.28124,
+(
+def:0.24498,
+mno:0.03627)
+'A':0.17710)
+B:0.04870,
+
+abc:0.05925,
+(
+ghi:0.06914,
+jkl:0.13776)
+C:0.09853);
+"""
+
+minimal = "();"
+no_names = "((,),(,));"
+missing_tip_name = "((a,b),(c,));"
+
+empty = '();'
+single = '(abc:3);'
+double = '(abc:3, def:4);'
+onenest = '(abc:3, (def:4, ghi:5):6 );'
+nodedata = '(abc:3, (def:4, ghi:5)jkl:6 );'
+
+exp_ascii_art_three_children = """\
+          /-a
+         |
+---------|          /-b
+         |         |
+          \--------|--c
+                   |
+                    \-d\
+"""
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/tree/tests/test_trie.py b/skbio/tree/tests/test_trie.py
new file mode 100644
index 0000000..3df417d
--- /dev/null
+++ b/skbio/tree/tests/test_trie.py
@@ -0,0 +1,216 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import zip
+
+from unittest import TestCase, main
+
+from skbio.tree import CompressedTrie, fasta_to_pairlist
+from skbio.tree._trie import _CompressedNode
+
+
+class CompressedNodeTests(TestCase):
+    """Tests for the _CompressedNode class"""
+
+    def setUp(self):
+        """Set up test data for use in compresses node unit tests"""
+        self.key = "aba"
+        self.values = [1, 2]
+        self.node = _CompressedNode(self.key, self.values)
+
+    def test_init(self):
+        """Node init should construct the right structure"""
+        # With no values should create a node with an empty list for values,
+        # the provided key as key, and an empty dictionary as children
+        n = _CompressedNode(self.key)
+        self.assertEqual(n.values, [])
+        self.assertEqual(n.key, self.key)
+        self.assertEqual(n.children, {})
+        # With values should create a node with the provided values list as
+        # values, the provided key as key, and an empty dictionary as children
+        n = _CompressedNode(self.key, self.values)
+        self.assertEqual(n.values, self.values)
+        self.assertEqual(n.key, self.key)
+        self.assertEqual(n.children, {})
+
+    def test_truth_value(self):
+        """Non zero should check for any data on the node"""
+        n = _CompressedNode("")
+        self.assertFalse(bool(n))
+        self.assertTrue(bool(self.node))
+
+    def test_len(self):
+        """Should return the number of values attached to the node"""
+        self.assertEqual(len(self.node), 2)
+
+    def test_size(self):
+        """Should return the number of nodes attached to the node"""
+        self.assertEqual(self.node.size, 1)
+
+    def test_prefix_map(self):
+        """Should return the prefix map of the node"""
+        exp = {1: [2]}
+        self.assertEqual(self.node.prefix_map, exp)
+
+    def test_insert(self):
+        """Correctly inserts a new key in the node"""
+        n = _CompressedNode(self.key, self.values)
+        n.insert("abb", [3])
+
+        # A new node has been create with the common prefix
+        self.assertEqual(n.key, "ab")
+        self.assertEqual(n.values, [])
+        # Tests the old node and the new one has been correctly added
+        # as children
+        exp_keys = set(["b", "a"])
+        self.assertEqual(set(n.children.keys()), exp_keys)
+        # Check that the children have the current values
+        self.assertEqual(n.children["b"].key, "b")
+        self.assertEqual(n.children["b"].values, [[3]])
+        self.assertEqual(n.children["b"].children, {})
+
+        self.assertEqual(n.children["a"].key, "a")
+        self.assertEqual(n.children["a"].values, [1, 2])
+        self.assertEqual(n.children["a"].children, {})
+
+    def test_find(self):
+        """The key could be found"""
+        # Correctly retrieves the key stored in the calling node
+        self.assertEqual(self.node.find("aba"), [1, 2])
+
+        # Correctly retrieves the key stored in a node attached to calling one
+        n = _CompressedNode(self.key, self.values)
+        n.insert("abb", [3])
+        self.assertEqual(n.find("aba"), [1, 2])
+        self.assertEqual(n.find("abb"), [[3]])
+        self.assertEqual(n.find("ab"), [])
+
+        # Correctly retrieves an empty list for a non existent key
+        self.assertEqual(n.find("cd"), [])
+
+
+class CompressedTrieTests(TestCase):
+    """Tests for the CompressedTrie class"""
+
+    def setUp(self):
+        """Set up test data for use in compressed trie unit tests"""
+        self.data = [("ab",  "0"),
+                     ("abababa", "1"),
+                     ("abab", "2"),
+                     ("baba", "3"),
+                     ("ababaa", "4"),
+                     ("a", "5"),
+                     ("abababa", "6"),
+                     ("bab", "7"),
+                     ("babba", "8")]
+        self.empty_trie = CompressedTrie()
+        self.trie = CompressedTrie(self.data)
+
+    def test_init(self):
+        """Trie init should construct the right structure"""
+        # In no pair_list is provided, it should create an empty Trie
+        t = CompressedTrie()
+        self.assertEqual(t._root.key, "")
+        self.assertEqual(t._root.values, [])
+        self.assertEqual(t._root.children, {})
+        # If a pair_list is provided, it should insert all the data
+        t = CompressedTrie(self.data)
+        self.assertEqual(t._root.key, "")
+        self.assertEqual(t._root.values, [])
+        self.assertEqual(set(t._root.children.keys()), set(["a", "b"]))
+
+    def test_non_zero(self):
+        """Non zero should check for any data on the trie"""
+        self.assertFalse(self.empty_trie)
+        self.assertTrue(self.trie)
+
+    def test_len(self):
+        """Should return the number of values attached to the trie"""
+        self.assertEqual(len(self.empty_trie), 0)
+        self.assertEqual(len(self.trie), 9)
+
+    def test_size(self):
+        """Should return the number of nodes attached to the trie"""
+        self.assertEqual(self.empty_trie.size, 1)
+        self.assertEqual(self.trie.size, 10)
+
+    def test_prefix_map(self):
+        """Should map prefix to values"""
+        exp1 = {"1": ["6", "2", "0", "5"],
+                "8": ["7"],
+                "3": [],
+                "4": []}
+        exp2 = {"1": ["6", "2", "0", "5"],
+                "8": [],
+                "3": ["7"],
+                "4": []}
+        self.assertTrue(self.trie.prefix_map in (exp1, exp2))
+
+    def test_insert(self):
+        """Correctly inserts a new key into the trie"""
+        t = CompressedTrie(self.data)
+        t.insert("babc", "9")
+        self.assertTrue("9" in t.find("babc"))
+
+        exp1 = {"1": ["6", "2", "0", "5"],
+                "9": ["7"],
+                "3": [],
+                "4": [],
+                "8": []}
+        exp2 = {"1": ["6", "2", "0", "5"],
+                "9": [],
+                "3": ["7"],
+                "4": [],
+                "8": []}
+        exp3 = {"1": ["6", "2", "0", "5"],
+                "9": [],
+                "3": [],
+                "4": [],
+                "8": ["7"]}
+        self.assertTrue(t.prefix_map in (exp1, exp2, exp3))
+
+    def test_find(self):
+        """Correctly founds the values present on the trie"""
+        for key, value in self.data:
+            self.assertTrue(value in self.trie.find(key))
+        self.assertEqual(self.trie.find("cac"), [])
+        self.assertEqual(self.trie.find("abababa"), ["1", "6"])
+
+
+class FastaToPairlistTests(TestCase):
+    """Tests for the fasta_to_pairlist function"""
+
+    def setUp(self):
+        self.seqs = [("sid_0", "AC"),
+                     ("sid_1", "ACAGTC"),
+                     ("sid_2", "ACTA"),
+                     ("sid_3", "CAGT"),
+                     ("sid_4", "CATGAA"),
+                     ("sid_5", "A"),
+                     ("sid_6", "CATGTA"),
+                     ("sid_7", "CAA"),
+                     ("sid_8", "CACCA")]
+
+    def test_fasta_to_pairlist(self):
+        """Correctly returns a list of (seq, label)"""
+        exp = [("AC", "sid_0"),
+               ("ACAGTC", "sid_1"),
+               ("ACTA", "sid_2"),
+               ("CAGT", "sid_3"),
+               ("CATGAA", "sid_4"),
+               ("A", "sid_5"),
+               ("CATGTA", "sid_6"),
+               ("CAA", "sid_7"),
+               ("CACCA", "sid_8")]
+
+        for obs, exp in zip(fasta_to_pairlist(self.seqs), exp):
+            self.assertEqual(obs, exp)
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/util/__init__.py b/skbio/util/__init__.py
new file mode 100644
index 0000000..86c2699
--- /dev/null
+++ b/skbio/util/__init__.py
@@ -0,0 +1,76 @@
+"""
+Utility functionality (:mod:`skbio.util`)
+=========================================
+
+.. currentmodule:: skbio.util
+
+This package provides general exception/warning definitions used throughout
+scikit-bio, as well as various utility functionality, including I/O and
+unit-testing convenience functions.
+
+Testing functionality
+---------------------
+
+Common functionality to support testing in skbio.
+
+.. autosummary::
+   :toctree: generated/
+
+   get_data_path
+
+Miscellaneous functionality
+---------------------------
+
+Generally useful functions that don't fit in more specific locations.
+
+.. autosummary::
+   :toctree: generated/
+
+   cardinal_to_ordinal
+   create_dir
+   find_duplicates
+   flatten
+   is_casava_v180_or_later
+   remove_files
+   safe_md5
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   TestingUtilError
+
+Warnings
+--------
+
+.. autosummary::
+   :toctree: generated/
+
+   EfficiencyWarning
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from numpy.testing import Tester
+
+from ._warning import EfficiencyWarning
+from ._exception import TestingUtilError
+from ._misc import (cardinal_to_ordinal, create_dir, find_duplicates, flatten,
+                    is_casava_v180_or_later, remove_files, safe_md5)
+from ._testing import get_data_path
+
+__all__ = ['EfficiencyWarning', 'TestingUtilError',
+           'cardinal_to_ordinal', 'create_dir', 'find_duplicates', 'flatten',
+           'is_casava_v180_or_later', 'remove_files', 'safe_md5',
+           'get_data_path']
+
+test = Tester().test
diff --git a/skbio/util/_exception.py b/skbio/util/_exception.py
new file mode 100644
index 0000000..328087a
--- /dev/null
+++ b/skbio/util/_exception.py
@@ -0,0 +1,14 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+
+class TestingUtilError(Exception):
+    """Raised when an exception is needed to test exception handling."""
+    pass
diff --git a/skbio/util/_misc.py b/skbio/util/_misc.py
new file mode 100644
index 0000000..90259f4
--- /dev/null
+++ b/skbio/util/_misc.py
@@ -0,0 +1,334 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import hashlib
+from os import remove, makedirs
+from os.path import exists, isdir
+from functools import partial
+
+
+def cardinal_to_ordinal(n):
+    """Return ordinal string version of cardinal int `n`.
+
+    Parameters
+    ----------
+    n : int
+        Cardinal to convert to ordinal. Must be >= 0.
+
+    Returns
+    -------
+    str
+        Ordinal version of cardinal `n`.
+
+    Raises
+    ------
+    ValueError
+        If `n` is less than 0.
+
+    Notes
+    -----
+    This function can be useful when writing human-readable error messages.
+
+    Examples
+    --------
+    >>> from skbio.util import cardinal_to_ordinal
+    >>> cardinal_to_ordinal(0)
+    '0th'
+    >>> cardinal_to_ordinal(1)
+    '1st'
+    >>> cardinal_to_ordinal(2)
+    '2nd'
+    >>> cardinal_to_ordinal(3)
+    '3rd'
+
+    """
+    # Taken and modified from http://stackoverflow.com/a/20007730/3776794
+    # Originally from http://codegolf.stackexchange.com/a/4712 by Gareth
+    if n < 0:
+        raise ValueError("Cannot convert negative integer %d to ordinal "
+                         "string." % n)
+    return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
+
+
+def is_casava_v180_or_later(header_line):
+    """Check if the header looks like it is Illumina software post-casava v1.8
+
+    Parameters
+    ----------
+    header_line : bytes
+        A header line
+
+    Returns
+    -------
+    bool
+        ``True`` for if casava v1.8+, otherwise ``False``
+
+    Examples
+    --------
+    >>> from skbio.util import is_casava_v180_or_later
+    >>> print(is_casava_v180_or_later('@foo'))
+    False
+    >>> id_ = '@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
+    >>> print(is_casava_v180_or_later(id_))
+    True
+
+    """
+    if not header_line.startswith(b'@'):
+        raise ValueError("Non-header line passed in!")
+    fields = header_line.split(b':')
+
+    return len(fields) == 10 and fields[7] in b'YN'
+
+
+def safe_md5(open_file, block_size=2 ** 20):
+    """Computes an md5 sum without loading the file into memory
+
+    Parameters
+    ----------
+    open_file : file object
+        open file handle to the archive to compute the checksum. It
+        must be open as a binary file
+    block_size : int, optional
+        size of the block taken per iteration
+
+    Returns
+    -------
+    md5 : md5 object from the hashlib module
+        object with the loaded file
+
+    Notes
+    -----
+    This method is based on the answers given in:
+    http://stackoverflow.com/a/1131255/379593
+
+    Examples
+    --------
+    >>> from StringIO import StringIO
+    >>> from skbio.util import safe_md5
+    >>> fd = StringIO("foo bar baz") # open file like object
+    >>> x = safe_md5(fd)
+    >>> x.hexdigest()
+    'ab07acbb1e496801937adfa772424bf7'
+    >>> fd.close()
+
+    """
+    md5 = hashlib.md5()
+    data = True
+    while data:
+        data = open_file.read(block_size)
+        if data:
+            md5.update(data)
+    return md5
+
+
+def remove_files(list_of_filepaths, error_on_missing=True):
+    """Remove list of filepaths, optionally raising an error if any are missing
+
+    Parameters
+    ----------
+    list_of_filepaths : list of strings
+        list with filepaths to remove
+    error_on_missing : bool, optional
+        whether or not the function should raise an ``OSError`` if a file is
+        not found
+
+    Raises
+    ------
+    OSError
+        If a filepath in the list does not exist
+
+    Examples
+    --------
+    >>> from tempfile import NamedTemporaryFile
+    >>> from os.path import exists
+    >>> from skbio.util import remove_files
+    >>> h = NamedTemporaryFile(delete=False)
+    >>> exists(h.name) # it exists
+    True
+    >>> remove_files([h.name])
+    >>> exists(h.name) # and now it's gone
+    False
+
+    """
+    missing = []
+    for fp in list_of_filepaths:
+        try:
+            remove(fp)
+        except OSError:
+            missing.append(fp)
+
+    if error_on_missing and missing:
+        raise OSError("Some filepaths were not accessible: %s" %
+                      '\t'.join(missing))
+
+
+def create_dir(dir_name, fail_on_exist=False, handle_errors_externally=False):
+    """Create a directory safely and fail meaningfully
+
+    Parameters
+    ----------
+    dir_name: string
+        name of directory to create
+
+    fail_on_exist: bool, optional
+        if true raise an error if ``dir_name`` already exists
+
+    handle_errors_externally: bool, optional
+        if True do not raise Errors, but return failure codes. This allows to
+        handle errors locally and e.g. hint the user at a --force_overwrite
+        options.
+
+    Returns
+    -------
+    return_value : int
+        These values are only returned if no error is raised:
+
+        - ``0``:  directory was safely created
+        - ``1``:  directory already existed
+        - ``2``:  a file with the same name exists
+        - ``3``:  any other unspecified ``OSError``
+
+    Notes
+    -----
+    Depending  of how thorough we want to be we could add tests, e.g. for
+    testing actual write permission in an existing dir.
+
+    Examples
+    --------
+    >>> from skbio.util import create_dir
+    >>> from os.path import exists, join
+    >>> from tempfile import gettempdir
+    >>> from os import rmdir
+    >>> new_dir = join(gettempdir(), 'scikitbio')
+    >>> create_dir(new_dir)
+    0
+    >>> exists(new_dir)
+    True
+    >>> rmdir(new_dir)
+
+    """
+    error_code_lookup = _get_create_dir_error_codes()
+    # pre-instanciate function with
+    ror = partial(_handle_error_codes, dir_name, handle_errors_externally)
+
+    if exists(dir_name):
+        if isdir(dir_name):
+            # dir is there
+            if fail_on_exist:
+                return ror(error_code_lookup['DIR_EXISTS'])
+            else:
+                return error_code_lookup['DIR_EXISTS']
+        else:
+            # must be file with same name
+            return ror(error_code_lookup['FILE_EXISTS'])
+    else:
+        # no dir there, try making it
+        try:
+            makedirs(dir_name)
+        except OSError:
+            return ror(error_code_lookup['OTHER_OS_ERROR'])
+
+    return error_code_lookup['NO_ERROR']
+
+
+def find_duplicates(iterable):
+    """Find duplicate elements in an iterable.
+
+    Parameters
+    ----------
+    iterable : iterable
+        Iterable to be searched for duplicates (i.e., elements that are
+        repeated).
+
+    Returns
+    -------
+    set
+        Repeated elements in `iterable`.
+
+    """
+    # modified from qiita.qiita_db.util.find_repeated
+    # https://github.com/biocore/qiita
+    # see licenses/qiita.txt
+    seen, repeated = set(), set()
+    for e in iterable:
+        if e in seen:
+            repeated.add(e)
+        else:
+            seen.add(e)
+    return repeated
+
+
+def flatten(items):
+    """Removes one level of nesting from items
+
+    Parameters
+    ----------
+    items : iterable
+        list of items to flatten one level
+
+    Returns
+    -------
+    flattened_items : list
+        list of flattened items, items can be any sequence, but flatten always
+        returns a list.
+
+    Examples
+    --------
+    >>> from skbio.util import flatten
+    >>> h = [['a', 'b', 'c', 'd'], [1, 2, 3, 4, 5], ['x', 'y'], ['foo']]
+    >>> print(flatten(h))
+    ['a', 'b', 'c', 'd', 1, 2, 3, 4, 5, 'x', 'y', 'foo']
+
+    """
+    result = []
+    for i in items:
+        try:
+            result.extend(i)
+        except TypeError:
+            result.append(i)
+    return result
+
+
+def _get_create_dir_error_codes():
+    return {'NO_ERROR':      0,
+            'DIR_EXISTS':    1,
+            'FILE_EXISTS':   2,
+            'OTHER_OS_ERROR': 3}
+
+
+def _handle_error_codes(dir_name, suppress_errors=False,
+                        error_code=None):
+    """Wrapper function for error_handling.
+
+    dir_name: name of directory that raised the error
+    suppress_errors: if True raise Errors, otherwise return error_code
+    error_code: the code for the error
+
+    """
+    error_code_lookup = _get_create_dir_error_codes()
+
+    if error_code is None:
+        error_code = error_code_lookup['NO_ERROR']
+
+    error_strings = \
+        {error_code_lookup['DIR_EXISTS']:
+         "Directory already exists: %s" % dir_name,
+         error_code_lookup['FILE_EXISTS']:
+         "File with same name exists: %s" % dir_name,
+         error_code_lookup['OTHER_OS_ERROR']:
+         "Could not create output directory: %s. " % dir_name +
+         "Check the permissions."}
+
+    if error_code == error_code_lookup['NO_ERROR']:
+        return error_code_lookup['NO_ERROR']
+    if suppress_errors:
+        return error_code
+    else:
+        raise OSError(error_strings[error_code])
diff --git a/skbio/util/_testing.py b/skbio/util/_testing.py
new file mode 100644
index 0000000..b9495bd
--- /dev/null
+++ b/skbio/util/_testing.py
@@ -0,0 +1,47 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import os
+import inspect
+
+
+def get_data_path(fn, subfolder='data'):
+    """Return path to filename ``fn`` in the data folder.
+
+    During testing it is often necessary to load data files. This
+    function returns the full path to files in the ``data`` subfolder
+    by default.
+
+    Parameters
+    ----------
+    fn : str
+        File name.
+
+    subfolder : str, defaults to ``data``
+        Name of the subfolder that contains the data.
+
+
+    Returns
+    -------
+    str
+        Inferred absolute path to the test data for the module where
+        ``get_data_path(fn)`` is called.
+
+    Notes
+    -----
+    The requested path may not point to an existing file, as its
+    existence is not checked.
+
+    """
+    # getouterframes returns a list of tuples: the second tuple
+    # contains info about the caller, and the second element is its
+    # filename
+    callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
+    path = os.path.dirname(os.path.abspath(callers_filename))
+    data_path = os.path.join(path, subfolder, fn)
+    return data_path
diff --git a/skbio/util/_warning.py b/skbio/util/_warning.py
new file mode 100644
index 0000000..91cb141
--- /dev/null
+++ b/skbio/util/_warning.py
@@ -0,0 +1,22 @@
+from __future__ import absolute_import, division, print_function
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+
+class EfficiencyWarning(Warning):
+    """Warn about potentially accidental use of inefficient code.
+
+    For example, if a user doesn't have an optimized version of a
+    function/algorithm available in their scikit-bio installation, a slower,
+    pure-Python implementation may be used instead. This warning can be used to
+    let the user know they are using a version of the function that could be
+    potentially orders of magnitude slower.
+
+    """
+    pass
diff --git a/skbio/util/tests/__init__.py b/skbio/util/tests/__init__.py
new file mode 100644
index 0000000..c99682c
--- /dev/null
+++ b/skbio/util/tests/__init__.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
diff --git a/skbio/util/tests/test_misc.py b/skbio/util/tests/test_misc.py
new file mode 100644
index 0000000..9e5b5fb
--- /dev/null
+++ b/skbio/util/tests/test_misc.py
@@ -0,0 +1,160 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range
+from six import BytesIO
+
+from tempfile import NamedTemporaryFile, mkdtemp
+from os.path import exists, join
+from unittest import TestCase, main
+from shutil import rmtree
+from uuid import uuid4
+
+from skbio.util import (cardinal_to_ordinal, safe_md5, remove_files,
+                        create_dir, find_duplicates, flatten,
+                        is_casava_v180_or_later)
+from skbio.util._misc import _handle_error_codes
+
+
+class MiscTests(TestCase):
+    def setUp(self):
+        self.dirs_to_remove = []
+
+    def tearDown(self):
+        for element in self.dirs_to_remove:
+            rmtree(element)
+
+    def test_is_casava_v180_or_later(self):
+        self.assertFalse(is_casava_v180_or_later(b'@foo'))
+        id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
+        self.assertTrue(is_casava_v180_or_later(id_))
+
+        with self.assertRaises(ValueError):
+            is_casava_v180_or_later(b'foo')
+
+    def test_safe_md5(self):
+        exp = 'ab07acbb1e496801937adfa772424bf7'
+
+        fd = BytesIO(b'foo bar baz')
+        obs = safe_md5(fd)
+        self.assertEqual(obs.hexdigest(), exp)
+
+        fd.close()
+
+    def test_remove_files(self):
+        # create list of temp file paths
+        test_fds = [NamedTemporaryFile(delete=False) for i in range(5)]
+        test_filepaths = [element.name for element in test_fds]
+
+        # should work just fine
+        remove_files(test_filepaths)
+
+        # check that an error is raised on trying to remove the files...
+        self.assertRaises(OSError, remove_files, test_filepaths)
+
+        # touch one of the filepaths so it exists
+        extra_file = NamedTemporaryFile(delete=False).name
+        test_filepaths.append(extra_file)
+
+        # no error is raised on trying to remove the files
+        # (although 5 don't exist)...
+        remove_files(test_filepaths, error_on_missing=False)
+        # ... and the existing file was removed
+        self.assertFalse(exists(extra_file))
+
+        # try to remove them with remove_files and verify that an IOError is
+        # raises
+        self.assertRaises(OSError, remove_files, test_filepaths)
+
+        # now get no error when error_on_missing=False
+        remove_files(test_filepaths, error_on_missing=False)
+
+    def test_create_dir(self):
+        # create a directory
+        tmp_dir_path = mkdtemp()
+
+        # create a random temporary directory name
+        tmp_dir_path2 = join(mkdtemp(), str(uuid4()))
+        tmp_dir_path3 = join(mkdtemp(), str(uuid4()))
+
+        self.dirs_to_remove += [tmp_dir_path, tmp_dir_path2, tmp_dir_path3]
+
+        # create on existing dir raises OSError if fail_on_exist=True
+        self.assertRaises(OSError, create_dir, tmp_dir_path,
+                          fail_on_exist=True)
+        self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=True,
+                                    handle_errors_externally=True), 1)
+
+        # return should be 1 if dir exist and fail_on_exist=False
+        self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=False), 1)
+
+        # if dir not there make it and return always 0
+        self.assertEqual(create_dir(tmp_dir_path2), 0)
+        self.assertEqual(create_dir(tmp_dir_path3, fail_on_exist=True), 0)
+
+    def test_handle_error_codes_no_error(self):
+        obs = _handle_error_codes('/foo/bar/baz')
+        self.assertEqual(obs, 0)
+
+    def test_flatten(self):
+        self.assertEqual(flatten(['aa', 'bb', 'cc']), list('aabbcc'))
+        self.assertEqual(flatten([1, [2, 3], [[4, [5]]]]), [1, 2, 3, [4, [5]]])
+
+
+class CardinalToOrdinalTests(TestCase):
+    def test_valid_range(self):
+        # taken and modified from http://stackoverflow.com/a/20007730/3776794
+        exp = ['0th', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th',
+               '9th', '10th', '11th', '12th', '13th', '14th', '15th', '16th',
+               '17th', '18th', '19th', '20th', '21st', '22nd', '23rd', '24th',
+               '25th', '26th', '27th', '28th', '29th', '30th', '31st', '32nd',
+               '100th', '101st', '42042nd']
+        obs = [cardinal_to_ordinal(n) for n in
+               list(range(0, 33)) + [100, 101, 42042]]
+        self.assertEqual(obs, exp)
+
+    def test_invalid_n(self):
+        with self.assertRaisesRegexp(ValueError, '-1'):
+            cardinal_to_ordinal(-1)
+
+
+class TestFindDuplicates(TestCase):
+    def test_empty_input(self):
+        def empty_gen():
+            raise StopIteration()
+            yield
+
+        for empty in [], (), '', set(), {}, empty_gen():
+            self.assertEqual(find_duplicates(empty), set())
+
+    def test_no_duplicates(self):
+        self.assertEqual(find_duplicates(['a', 'bc', 'def', 'A']), set())
+
+    def test_one_duplicate(self):
+        self.assertEqual(find_duplicates(['a', 'bc', 'def', 'a']), set(['a']))
+
+    def test_many_duplicates(self):
+        self.assertEqual(find_duplicates(['a', 'bc', 'bc', 'def', 'a']),
+                         set(['a', 'bc']))
+
+    def test_all_duplicates(self):
+        self.assertEqual(
+            find_duplicates(('a', 'bc', 'bc', 'def', 'a', 'def', 'def')),
+            set(['a', 'bc', 'def']))
+
+    def test_mixed_types(self):
+        def gen():
+            for e in 'a', 1, 'bc', 2, 'a', 2, 2, 3.0:
+                yield e
+
+        self.assertEqual(find_duplicates(gen()), set(['a', 2]))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/skbio/util/tests/test_testing.py b/skbio/util/tests/test_testing.py
new file mode 100644
index 0000000..e0b6edd
--- /dev/null
+++ b/skbio/util/tests/test_testing.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import os
+
+import numpy.testing as npt
+
+from skbio.util import get_data_path
+
+
+def test_get_data_path():
+    fn = 'parrot'
+    path = os.path.dirname(os.path.abspath(__file__))
+    data_path = os.path.join(path, 'data', fn)
+    data_path_2 = get_data_path(fn)
+    npt.assert_string_equal(data_path_2, data_path)
+
+
+if __name__ == '__main__':
+    import nose
+    nose.runmodule()
diff --git a/skbio/workflow.py b/skbio/workflow.py
new file mode 100644
index 0000000..fb68387
--- /dev/null
+++ b/skbio/workflow.py
@@ -0,0 +1,550 @@
+#!/usr/bin/env python
+r"""
+Constructing workflows (:mod:`skbio.workflow`)
+==============================================
+
+.. currentmodule:: skbio.workflow
+
+Construct arbitrarily complex workflows in which the specific methods run are
+determined at runtime. This module supports short circuiting a workflow if an
+item fails, supports ordering methods, callbacks for processed items, and
+deciding what methods are executed based on state or runtime options.
+
+Classes
+-------
+
+.. autosummary::
+    :toctree: generated/
+
+    Workflow
+
+Decorators
+----------
+
+.. autosummary::
+    :toctree: generated/
+
+    requires
+    method
+
+Examples
+--------
+>>> from skbio.workflow import Workflow
+
+As an example of the ``Workflow`` object, let's construct a sequence processor
+that will filter sequences that are < 10 nucleotides, reverse the sequence
+if the runtime options indicate to, and truncate if a specific nucleotide
+pattern is observed. The ``Workflow`` object will only short circuit, and
+evaluate requirements on methods decorated by ``method``. Developers are free
+to define as many methods as they'd like within the object definition, and
+which can be called from workflow methods, but they will not be subjected
+directly to workflow checks.
+
+>>> nuc_pattern = 'AATTG'
+>>> has_nuc_pattern = lambda s: s[:len(nuc_pattern)] == nuc_pattern
+>>> class SequenceProcessor(Workflow):
+...    def initialize_state(self, item):
+...        # Setup the state for a new item (e.g., a new sequence)
+...        self.state = item
+...    @method(priority=100)
+...    def check_length(self):
+...        # Always make sure the sequence is at least 10 nucleotides
+...        if len(self.state) < 10:
+...            self.failed = True
+...    @method(priority=90)
+...    @requires(state=has_nuc_pattern)
+...    def truncate(self):
+...        # Truncate if a specific starting nucleotide pattern is observed
+...        self.state = self.state[len(nuc_pattern):]
+...    @method(priority=80)
+...    @requires(option='reverse', values=True)
+...    def reverse(self):
+...        # Reverse the sequence if indicatd at runtime
+...        self.state = self.state[::-1]
+
+An instance of a ``Workflow`` must be passed a ``state`` object and any runtime
+options. There are a few other useful parameters that can be specfied but are
+out of scope for the purposes of this example. We also do not need to provide
+a state object as our ``initialize_state`` method overrides ``self.state``.
+Now, let's create the instance.
+
+>>> wf = SequenceProcessor(state=None, options={'reverse=': False})
+
+To run items through the ``SequenceProcessor``, we need to pass in an
+iterable. So, lets create a ``list`` of sequences.
+
+>>> seqs = ['AAAAAAATTTTTTT', 'ATAGACC', 'AATTGCCGGAC', 'ATATGAACAAA']
+
+Before we run these sequences through, we're going to also define callbacks
+that are applied to the result of an single pass through the ``Workflow``.
+Callbacks are optional -- by default, a success will simply yield the state
+member variable while failures are ignored -- but, depending on your workflow,
+it can be useful to handle failures or potentially do something fun and
+exciting on success.
+
+>>> def success_f(obj):
+...     return "SUCCESS: %s" % obj.state
+>>> def fail_f(obj):
+...     return "FAIL: %s" % obj.state
+
+Now, lets process some data!
+
+>>> for result in wf(seqs, success_callback=success_f, fail_callback=fail_f):
+...     print result
+SUCCESS: AAAAAAATTTTTTT
+FAIL: ATAGACC
+SUCCESS: CCGGAC
+SUCCESS: ATATGAACAAA
+
+A few things of note just happened. First off, none of the sequences were
+reversed as the ``SequenceProcessor`` did not have option "reverse"
+set to ``True``. Second, you'll notice that the 3rd sequence was truncated,
+which is expected as it matched our nucleotide pattern of interest. Finally,
+of the sequences we processed, only a single sequence failed.
+
+To assist in constructing workflows, debug information is available but it
+must be turned on at instantiation. Let's do that, and while we're at it, let's
+go ahead and enable the reversal method. This time through though, were going
+to walk through an item at a time so we can examine the debug information.
+
+>>> wf = SequenceProcessor(state=None, options={'reverse':True}, debug=True)
+>>> gen = wf(seqs, fail_callback=lambda x: x.state)
+>>> gen.next()
+'TTTTTTTAAAAAAA'
+>>> print wf.failed
+False
+>>> print wf.debug_trace
+set([('check_length', 0), ('reverse', 2)])
+
+The ``debug_trace`` specifies the methods executed, and the order of their
+execution where closer to zero indicates earlier in the execution order. Gaps
+indicate there was a method evaluated but not executed. Each of the items in
+the ``debug_trace`` is a key into a few other ``dict`` of debug information
+which we'll discuss in a moment. Did you see that the sequence was reversed
+this time through the workflow?
+
+Now, let's take a look at the next item, which on our prior run through the
+workflow was a failed item.
+
+>>> gen.next()
+'ATAGACC'
+>>> print wf.failed
+True
+>>> print wf.debug_trace
+set([('check_length', 0)])
+
+What we can see is that the failed sequence only executed the check_length
+method. Since the sequence didn't pass our length filter of 10 nucleotides,
+it was marked as failed within the ``check_length`` method. As a result, none
+of the other methods were evaluated (note: this short circuiting behavior can
+be disabled if desired).
+
+This third item previously matched our nucleotide pattern of interest for
+truncation. Let's see what that looks like in the debug output.
+
+>>> gen.next() #
+'CAGGCC'
+>>> print wf.failed
+False
+>>> wf.debug_trace
+set([('check_length', 0), ('truncate', 1), ('reverse', 2)])
+
+In this last example, we can see that the ``truncate`` method was executed
+prior to the ``reverse`` method and following the ``check_length`` method. This
+is as anticipated given the priorities we specified for these methods. Since
+the ``truncate`` method is doing something interesting, let's take a closer
+look at how the ``state`` is changing. First, we're going to dump out the
+state of the workflow prior to the call to ``truncate`` and then we're going
+to dump out the ``state`` following the call to ``truncate``, which will allow
+us to rapidly what is going on.
+
+>>> wf.debug_pre_state[('truncate', 1)]
+'AATTGCCGGAC'
+>>> wf.debug_post_state[('truncate', 1)]
+'CCGGAC'
+
+As we expect, we have our original sequence going into ``truncate``, and
+following the application of ``truncate``, our sequence is missing our
+nucleotide pattern of interest. Awesome, right?
+
+There is one final piece of debug output, ``wf.debug_runtime``, which can
+be useful when diagnosing the amount of time required for individual methods
+on a particular piece of state (as opposed to the aggregate as provided by
+cProfile).
+
+Three final components of the workflow that are quite handy are objects that
+allow you to indicate ``anything`` as an option value, anything that is
+``not_none``, and a mechanism to define a range of valid values.
+
+>>> from skbio.workflow import not_none, anything
+>>> class Ex(Workflow):
+...     @method()
+...     @requires(option='foo', values=not_none)
+...     def do_something(self):
+...         pass
+...     @method()
+...     @requires(option='bar', values=anything)
+...     def do_something_else(self):
+...         pass
+...     @method()
+...     @requires(option='foobar', values=[1,2,3])
+...     def do_something_awesome(self):
+...         pass
+...
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from future.utils import viewitems
+
+import sys
+from copy import deepcopy
+from time import time
+from functools import update_wrapper
+from collections import Iterable
+from types import MethodType
+
+
+class NotExecuted(object):
+    """Helper object to track if a method was executed"""
+    def __init__(self):
+        self.msg = None
+
+    def __call__(self, msg):
+        self.msg = msg
+        return self
+_not_executed = NotExecuted()
+
+
+class Exists(object):
+    """Stub object to assist with ``requires`` when a value exists"""
+    def __contains__(self, item):
+        return True
+anything = Exists()  # external, for when a value can be anything
+
+
+class NotNone(object):
+    def __contains__(self, item):
+        if item is None:
+            return False
+        else:
+            return True
+not_none = NotNone()
+
+
+class Workflow(object):
+    """Arbitrary workflow support structure
+
+    Methods that are considered to be directly part of the workflow must
+    be decorated with ``method``. The workflow methods offer a mechanism to
+    logically group functionality together, and are free to make subsequent
+    calls to other methods.
+
+    All methods of a subclass of Workflow (those with and without the
+    ``method`` decoration) can take advantage of the ``requires`` decorator
+    to specify any option or state requirements for the decorated function.
+
+    Parameters
+    ----------
+    state : object
+        State can be anything or nothing. This is dependent on the
+        workflow as in some cases, it is useful to preallocate state
+        while in other workflows state may be ignored.
+    short_circuit : bool
+        if True, enables ignoring function methods when a given item
+        has failed
+    debug : bool
+        Enable debug mode
+    options : dict
+        runtime options, {'option':values}, that the ``requires``
+        decorator can interrogate.
+    kwargs : dict
+        Additional arguments will be added as member variables to self.
+        This is handy if additional contextual information is needed by a
+        workflow method (e.g., a lookup table).
+
+    Attributes
+    ----------
+    state
+    short_circuit
+    debug
+    options
+    failed
+
+    """
+
+    def __init__(self, state, short_circuit=True, debug=False, options=None,
+                 **kwargs):
+        r"""Build thy workflow of self"""
+        if options is None:
+            self.options = {}
+        else:
+            self.options = options
+
+        self.short_circuit = short_circuit
+        self.failed = False
+        self.debug = debug
+        self.state = state
+        self.iter_ = None
+
+        for k, v in viewitems(kwargs):
+            if hasattr(self, k):
+                raise AttributeError("'%s' already exists in self." % k)
+            setattr(self, k, v)
+
+        if self.debug:
+            self._setup_debug()
+
+    def initialize_state(self, item):
+        """Initialize state
+
+        This method is called first prior to any other defined workflow method
+        with the exception of _setup_debug_trace if self.debug is True
+
+        Parameters
+        ----------
+        item : anything
+            Workflow dependent
+        """
+        raise NotImplementedError("Must implement this method")
+
+    def _setup_debug(self):
+        """Wrap all methods with debug trace support"""
+        # ignore all members of the baseclass
+        ignore = set(dir(Workflow))
+
+        for attrname in dir(self):
+            if attrname in ignore:
+                continue
+
+            attr = getattr(self, attrname)
+
+            if isinstance(attr, MethodType):
+                setattr(self, attrname, self._debug_trace_wrapper(attr))
+
+    def _all_wf_methods(self):
+        """Get all workflow methods
+
+        Methods are sorted by priority
+        """
+        methods = []
+        for item in dir(self):
+            obj = getattr(self, item)
+            if hasattr(obj, 'priority'):
+                methods.append(obj)
+
+        def key(x):
+            return getattr(x, 'priority')
+
+        methods_sorted = sorted(methods, key=key, reverse=True)
+
+        if self.debug:
+            methods_sorted.insert(0, self._setup_debug_trace)
+
+        return methods_sorted
+
+    def _setup_debug_trace(self):
+        """Setup a trace
+
+        The trace is per item iterated over by the workflow. Information about
+        each method executed is tracked and keyed by::
+
+            (function name, order of execution)
+
+        Order of execution starts from zero. Multiple calls to the same
+        function are independent in the trace.
+
+        The following information is tracked::
+
+            debug_trace : set([key])
+            debug_runtime : {key: runtime}
+            debug_pre_state : {key: deepcopy(Workflow.state)}, state prior to
+                method execution
+            debug_post_state : {key: deepcopy(Workflow.state)}, state following
+                method execution
+        """
+        self.debug_counter = 0
+        self.debug_trace = set()
+        self.debug_runtime = {}
+        self.debug_pre_state = {}
+        self.debug_post_state = {}
+
+    def __call__(self, iter_, success_callback=None, fail_callback=None):
+        """Operate on all the data
+
+        This is the processing engine of the workflow. Callbacks are executed
+        following applying all workflow methods to an item from ``iter_``
+        (unless ``short_cicruit=True`` in which case method execution for an
+        item is stopped if ``failed=True``). Callbacks are provided ``self``
+        which allows them to examine any aspect of the workflow.
+
+        Parameters
+        ----------
+        it : an iterator
+        success_callback : method to call on a successful item prior to
+            yielding. By default, ``self.state`` is yielded.
+        fail_callback : method to call on a failed item prior to yielding. By
+            default, failures are ignored.
+
+        .. shownumpydoc
+        """
+        if success_callback is None:
+            def success_callback(x):
+                return x.state
+
+        self.iter_ = iter_
+        workflow = self._all_wf_methods()
+
+        for item in self.iter_:
+            self.failed = False
+
+            self.initialize_state(item)
+            for func in workflow:
+                if self.short_circuit and self.failed:
+                    break
+                else:
+                    func()
+
+            if self.failed:
+                if fail_callback is not None:
+                    yield fail_callback(self)
+            else:
+                yield success_callback(self)
+
+        self.iter_ = None
+
+    def _debug_trace_wrapper(self, func):
+        """Trace a function call"""
+        def wrapped():
+            """Track debug information about a method execution"""
+            if not hasattr(self, 'debug_trace'):
+                raise AttributeError(
+                    "%s doesn't have debug_trace!" % self.__class__)
+
+            exec_order = self.debug_counter
+            name = func.__name__
+            key = (name, exec_order)
+            pre_state = deepcopy(self.state)
+
+            self.debug_trace.add(key)
+            self.debug_counter += 1
+
+            start_time = time()
+            if func() is _not_executed:
+                self.debug_trace.remove(key)
+            else:
+                self.debug_runtime[key] = time() - start_time
+                self.debug_pre_state[key] = pre_state
+                self.debug_post_state[key] = deepcopy(self.state)
+
+        return update_wrapper(wrapped, func)
+
+
+class method(object):
+    """Decorate a function to indicate it is a workflow method
+
+    Parameters
+    ----------
+    priority : int
+        Specify a priority for the method, the higher the value the higher
+        the priority. Priorities are relative to a given workflow
+
+    """
+    highest_priority = sys.maxsize
+
+    def __init__(self, priority=0):
+        self.priority = priority
+
+    def __call__(self, func):
+        func.priority = self.priority
+        return func
+
+
+class requires(object):
+    """Decorator that executes a function if requirements are met
+
+    Parameters
+    ----------
+    option : any Hashable object
+        An option that is required for the decorated method to execute.
+        This option will be looked up within the containing ``Workflow``s'
+        ``options``.
+    values : object
+        A required value. This defaults to ``anything`` indicating that
+        the only requirement is that the ``option`` exists. It can be
+        useful to specify ``not_none`` which indicates that the
+        requirement is satisfied if the ``option`` exists and it holds
+        a value that is not ``None``. Values also supports iterables
+        or singular values.
+    state : Function
+        A requirement on workflow state. This must be a function that
+        accepts a single argument, and returns ``True`` to indicate
+        the requirement is satisfied, or ``False`` to indicate the
+        requirement is not satisfied. This method will be passed the
+        containing ``Workflow``s' ``state`` member variable.
+    """
+    def __init__(self, option=None, values=anything, state=None):
+        # self here is the requires object
+        self.option = option
+        self.required_state = state
+
+        if values is anything:
+            self.values = anything
+        elif values is not_none:
+            self.values = not_none
+        elif isinstance(values, set):
+            self.values = values
+        else:
+            if isinstance(values, str):
+                self.values = values
+            elif isinstance(values, Iterable):
+                self.values = set(values)
+            else:
+                self.values = set([values])
+
+    def __call__(self, func):
+        """Wrap a function
+
+        func : the function to wrap
+        """
+        def decorated(dec_self):
+            """A decorated function that has requirements
+
+            dec_self : this is "self" for the decorated function
+            """
+            if self.required_state is not None:
+                if not self.required_state(dec_self.state):
+                    return _not_executed
+
+            s_opt = self.option
+            ds_opts = dec_self.options
+
+            # if this is a function that doesn't have an option to validate
+            if s_opt is None:
+                func(dec_self)
+
+            # if the option exists in the Workflow
+            elif s_opt in ds_opts:
+                val = ds_opts[s_opt]
+
+                # if the value just needs to be not None
+                if self.values is not_none and val is not None:
+                    func(dec_self)
+
+                # otherwise make sure the value is acceptable
+                elif val in self.values:
+                    func(dec_self)
+
+                else:
+                    return _not_executed
+
+            else:
+                return _not_executed
+
+        return update_wrapper(decorated, func)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-skbio.git



More information about the debian-med-commit mailing list